diff --git a/.mailmap b/.mailmap index c69d9c734fb5e7e2268870ed680e34a0b29b9bf3..db4f2295bd9d792b47eb77aab179a9db0d968454 100644 --- a/.mailmap +++ b/.mailmap @@ -90,11 +90,16 @@ Frank Rowand Frank Zago Gao Xiang Gao Xiang +Gerald Schaefer +Gerald Schaefer +Gerald Schaefer Greg Kroah-Hartman Greg Kroah-Hartman Greg Kroah-Hartman Gregory CLEMENT Hanjun Guo +Heiko Carstens +Heiko Carstens Henk Vergonet Henrik Kretzschmar Henrik Rydberg @@ -193,6 +198,9 @@ Maxime Ripard Mayuresh Janorkar Michael Buesch Michel Dänzer +Mike Rapoport +Mike Rapoport +Mike Rapoport Miodrag Dinic Miquel Raynal Mitesh shah diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs index f6d9c2a8d52800c79a6f4563dfab2e3cf8a8e5d6..2e9ae311e02d23c6dc52e58781f89ee96c3cfe09 100644 --- a/Documentation/ABI/testing/debugfs-driver-habanalabs +++ b/Documentation/ABI/testing/debugfs-driver-habanalabs @@ -16,7 +16,16 @@ Description: Allow the root user to disable/enable in runtime the clock gating mechanism in Gaudi. Due to how Gaudi is built, the clock gating needs to be disabled in order to access the registers of the TPC and MME engines. This is sometimes needed - during debug by the user and hence the user needs this option + during debug by the user and hence the user needs this option. + The user can supply a bitmask value, each bit represents + a different engine to disable/enable its clock gating feature. + The bitmask is composed of 20 bits: + 0 - 7 : DMA channels + 8 - 11 : MME engines + 12 - 19 : TPC engines + The bit's location of a specific engine can be determined + using (1 << GAUDI_ENGINE_ID_*). GAUDI_ENGINE_ID_* values + are defined in uapi habanalabs.h file in enum gaudi_engine_id What: /sys/kernel/debug/habanalabs/hl/command_buffers Date: Jan 2019 diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst index 5fb52690002386bf9a2a570a3a85f714b6a72672..5aad534233cd807af4b3a2be583a0a28389a3ce5 100644 --- a/Documentation/admin-guide/README.rst +++ b/Documentation/admin-guide/README.rst @@ -258,7 +258,7 @@ Configuring the kernel Compiling the kernel -------------------- - - Make sure you have at least gcc 4.6 available. + - Make sure you have at least gcc 4.9 available. For more information, refer to :ref:`Documentation/process/changes.rst `. Please note that you can still run a.out user programs with this kernel. diff --git a/Documentation/arm64/cpu-feature-registers.rst b/Documentation/arm64/cpu-feature-registers.rst index 314fa5bc2655da3b30fdde5c12e602ae2633ccc5..f28853f80089bffd0882dc6be9e1b18898f8ef2f 100644 --- a/Documentation/arm64/cpu-feature-registers.rst +++ b/Documentation/arm64/cpu-feature-registers.rst @@ -171,6 +171,7 @@ infrastructure: 3) ID_AA64PFR1_EL1 - Processor Feature Register 1 + +------------------------------+---------+---------+ | Name | bits | visible | +------------------------------+---------+---------+ @@ -181,6 +182,7 @@ infrastructure: 4) MIDR_EL1 - Main ID Register + +------------------------------+---------+---------+ | Name | bits | visible | +------------------------------+---------+---------+ diff --git a/Documentation/block/bfq-iosched.rst b/Documentation/block/bfq-iosched.rst index 0d237d4028600b387f741efb79f46a83437a39d5..19d4d1570cee75ce477e3dc81f8fd23e4ac47635 100644 --- a/Documentation/block/bfq-iosched.rst +++ b/Documentation/block/bfq-iosched.rst @@ -492,13 +492,6 @@ set max_budget to higher values than those to which BFQ would have set it with auto-tuning. An alternative way to achieve this goal is to just increase the value of timeout_sync, leaving max_budget equal to 0. -weights -------- - -Read-only parameter, used to show the weights of the currently active -BFQ queues. - - 4. Group scheduling with BFQ ============================ @@ -566,7 +559,7 @@ Parameters to set For each group, there is only the following parameter to set. weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the -group inside its parent. Available values: 1..10000 (default 100). The +group inside its parent. Available values: 1..1000 (default 100). The linear mapping between ioprio and weights, described at the beginning of the tunable section, is still valid, but all weights higher than IOPRIO_BE_NR*10 are mapped to ioprio 0. diff --git a/Documentation/core-api/dma-api.rst b/Documentation/core-api/dma-api.rst index 2d8d2fed731720b130248caccf8badc22b5ffec2..f41620439ef349b0592d177052b7d6fa85fcf152 100644 --- a/Documentation/core-api/dma-api.rst +++ b/Documentation/core-api/dma-api.rst @@ -204,6 +204,14 @@ Returns the maximum size of a mapping for the device. The size parameter of the mapping functions like dma_map_single(), dma_map_page() and others should not be larger than the returned value. +:: + + bool + dma_need_sync(struct device *dev, dma_addr_t dma_addr); + +Returns %true if dma_sync_single_for_{device,cpu} calls are required to +transfer memory ownership. Returns %false if those calls can be skipped. + :: unsigned long diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml index 526593c8c614a06745005eaee14c8b47ba71bf92..4cc1a670c98601bd3b2f30eb265547568466956b 100644 --- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml +++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml @@ -47,6 +47,9 @@ properties: $ref: /schemas/types.yaml#/definitions/phandle-array description: Phandle to the device SRAM + iommus: + maxItems: 1 + memory-region: description: CMA pool to use for buffers allocation instead of the default diff --git a/Documentation/devicetree/bindings/sound/simple-card.yaml b/Documentation/devicetree/bindings/sound/simple-card.yaml index 8132d0c0f00a121d289f894cfd6f75b92553e9dd..35e669020296dbbd5c90a9df47d9fe62c36e88dc 100644 --- a/Documentation/devicetree/bindings/sound/simple-card.yaml +++ b/Documentation/devicetree/bindings/sound/simple-card.yaml @@ -378,6 +378,8 @@ examples: - | sound { compatible = "simple-audio-card"; + #address-cells = <1>; + #size-cells = <0>; simple-audio-card,name = "rsnd-ak4643"; simple-audio-card,format = "left_j"; @@ -391,10 +393,12 @@ examples: "ak4642 Playback", "DAI1 Playback"; dpcmcpu: simple-audio-card,cpu@0 { + reg = <0>; sound-dai = <&rcar_sound 0>; }; simple-audio-card,cpu@1 { + reg = <1>; sound-dai = <&rcar_sound 1>; }; @@ -418,6 +422,8 @@ examples: - | sound { compatible = "simple-audio-card"; + #address-cells = <1>; + #size-cells = <0>; simple-audio-card,routing = "pcm3168a Playback", "DAI1 Playback", @@ -426,6 +432,7 @@ examples: "pcm3168a Playback", "DAI4 Playback"; simple-audio-card,dai-link@0 { + reg = <0>; format = "left_j"; bitclock-master = <&sndcpu0>; frame-master = <&sndcpu0>; @@ -439,22 +446,23 @@ examples: }; simple-audio-card,dai-link@1 { + reg = <1>; format = "i2s"; bitclock-master = <&sndcpu1>; frame-master = <&sndcpu1>; convert-channels = <8>; /* TDM Split */ - sndcpu1: cpu@0 { + sndcpu1: cpu0 { sound-dai = <&rcar_sound 1>; }; - cpu@1 { + cpu1 { sound-dai = <&rcar_sound 2>; }; - cpu@2 { + cpu2 { sound-dai = <&rcar_sound 3>; }; - cpu@3 { + cpu3 { sound-dai = <&rcar_sound 4>; }; codec { @@ -466,6 +474,7 @@ examples: }; simple-audio-card,dai-link@2 { + reg = <2>; format = "i2s"; bitclock-master = <&sndcpu2>; frame-master = <&sndcpu2>; diff --git a/Documentation/driver-api/ptp.rst b/Documentation/driver-api/ptp.rst index a15192e323478b40de18932bf7ae77f44ea0d3d9..664838ae7776015bc923473fd85c46cf0388fadd 100644 --- a/Documentation/driver-api/ptp.rst +++ b/Documentation/driver-api/ptp.rst @@ -23,6 +23,7 @@ PTP hardware clock infrastructure for Linux + Ancillary clock features - Time stamp external events - Period output signals configurable from user space + - Low Pass Filter (LPF) access from user space - Synchronization of the Linux system time via the PPS subsystem PTP hardware clock kernel API @@ -94,3 +95,14 @@ Supported hardware - Auxiliary Slave/Master Mode Snapshot (optional interrupt) - Target Time (optional interrupt) + + * Renesas (IDT) ClockMatrix™ + + - Up to 4 independent PHC channels + - Integrated low pass filter (LPF), access via .adjPhase (compliant to ITU-T G.8273.2) + - Programmable output periodic signals + - Programmable inputs can time stamp external triggers + - Driver and/or hardware configuration through firmware (idtcm.bin) + - LPF settings (bandwidth, phase limiting, automatic holdover, physical layer assist (per ITU-T G.8273.2)) + - Programmable output PTP clocks, any frequency up to 1GHz (to other PHY/MAC time stampers, refclk to ASSPs/SoCs/FPGAs) + - Lock to GNSS input, automatic switching between GNSS and user-space PHC control (optional) diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst index 660dbaf0b9b8cd022d9a1f5cd1a74a1444ffc720..fcda5d6ba9ac91ab52683ce81035e5ea4f1fe356 100644 --- a/Documentation/filesystems/overlayfs.rst +++ b/Documentation/filesystems/overlayfs.rst @@ -560,8 +560,8 @@ When the NFS export feature is enabled, all directory index entries are verified on mount time to check that upper file handles are not stale. This verification may cause significant overhead in some cases. -Note: the mount options index=off,nfs_export=on are conflicting and will -result in an error. +Note: the mount options index=off,nfs_export=on are conflicting for a +read-write mount and will result in an error. Testsuite diff --git a/Documentation/networking/arcnet.rst b/Documentation/networking/arcnet.rst index e93d9820f0f1715c60fc47b9796c90f813d14f97..82fce606c0f0bc88edd87b82da972e682983c6ca 100644 --- a/Documentation/networking/arcnet.rst +++ b/Documentation/networking/arcnet.rst @@ -434,7 +434,7 @@ can set up your network then: ifconfig arc0 insight route add insight arc0 route add freedom arc0 /* I would use the subnet here (like I said - to to in "single protocol" above), + to in "single protocol" above), but the rest of the subnet unfortunately lies across the PPP link on freedom, which confuses diff --git a/Documentation/networking/ax25.rst b/Documentation/networking/ax25.rst index 824afd7002dbeda6f55ad4cc4f3a2610ad5d5e57..f060cfb1445a38a6e0d81c68e5184815976b7fed 100644 --- a/Documentation/networking/ax25.rst +++ b/Documentation/networking/ax25.rst @@ -6,7 +6,7 @@ AX.25 To use the amateur radio protocols within Linux you will need to get a suitable copy of the AX.25 Utilities. More detailed information about -AX.25, NET/ROM and ROSE, associated programs and and utilities can be +AX.25, NET/ROM and ROSE, associated programs and utilities can be found on http://www.linux-ax25.org. There is an active mailing list for discussing Linux amateur radio matters diff --git a/Documentation/networking/bareudp.rst b/Documentation/networking/bareudp.rst index 465a8b251bfeca4dc865d568ca368d38e8e76d02..b9d04ee6dac142c73a2eb7efce741e59da005726 100644 --- a/Documentation/networking/bareudp.rst +++ b/Documentation/networking/bareudp.rst @@ -8,9 +8,8 @@ There are various L3 encapsulation standards using UDP being discussed to leverage the UDP based load balancing capability of different networks. MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them. -The Bareudp tunnel module provides a generic L3 encapsulation tunnelling -support for tunnelling different L3 protocols like MPLS, IP, NSH etc. inside -a UDP tunnel. +The Bareudp tunnel module provides a generic L3 encapsulation support for +tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel. Special Handling ---------------- @@ -26,7 +25,7 @@ Usage 1) Device creation & deletion - a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847. + a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc This creates a bareudp tunnel device which tunnels L3 traffic with ethertype 0x8847 (MPLS traffic). The destination port of the UDP header will be set to @@ -34,14 +33,21 @@ Usage b) ip link delete bareudp0 -2) Device creation with multiple proto mode enabled +2) Device creation with multiproto mode enabled -There are two ways to create a bareudp device for MPLS & IP with multiproto mode -enabled. +The multiproto mode allows bareudp tunnels to handle several protocols of the +same family. It is currently only available for IP and MPLS. This mode has to +be enabled explicitly with the "multiproto" flag. - a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847 multiproto + a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype ipv4 multiproto - b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls + For an IPv4 tunnel the multiproto mode allows the tunnel to also handle + IPv6. + + b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc multiproto + + For MPLS, the multiproto mode allows the tunnel to handle both unicast + and multicast MPLS packets. 3) Device Usage diff --git a/Documentation/networking/can_ucan_protocol.rst b/Documentation/networking/can_ucan_protocol.rst index 4cef88d24fc7569536b58fd5378574aa5f9a9c3a..638ac1ee7914f1b6104d8b7119a9d94ef253f264 100644 --- a/Documentation/networking/can_ucan_protocol.rst +++ b/Documentation/networking/can_ucan_protocol.rst @@ -144,7 +144,7 @@ UCAN_COMMAND_SET_BITTIMING *Host2Dev; mandatory* -Setup bittiming by sending the the structure +Setup bittiming by sending the structure ``ucan_ctl_payload_t.cmd_set_bittiming`` (see ``struct bittiming`` for details) @@ -232,7 +232,7 @@ UCAN_IN_TX_COMPLETE zero The CAN device has sent a message to the CAN bus. It answers with a -list of of tuples . +list of tuples . The echo-id identifies the frame from (echos the id from a previous UCAN_OUT_TX message). The flag indicates the result of the diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst index 1e3f3ffee2480ab4749c8adb08cb811ff538f029..2014307fbe63428d6751d83f59bb941190259aff 100644 --- a/Documentation/networking/devlink/devlink-trap.rst +++ b/Documentation/networking/devlink/devlink-trap.rst @@ -486,6 +486,10 @@ narrow. The description of these groups must be added to the following table: - Contains packet traps for packets that should be locally delivered after routing, but do not match more specific packet traps (e.g., ``ipv4_bgp``) + * - ``external_delivery`` + - Contains packet traps for packets that should be routed through an + external interface (e.g., management interface) that does not belong to + the same device (e.g., switch ASIC) as the ingress interface * - ``ipv6`` - Contains packet traps for various IPv6 control packets (e.g., Router Advertisements) diff --git a/Documentation/networking/dsa/dsa.rst b/Documentation/networking/dsa/dsa.rst index 563d56c6a25c924e41d513272aee32d57ab535c1..a8d15dd2b42b72eef0f9cbe55b7f431033b1be2a 100644 --- a/Documentation/networking/dsa/dsa.rst +++ b/Documentation/networking/dsa/dsa.rst @@ -95,7 +95,7 @@ Ethernet switch. Networking stack hooks ---------------------- -When a master netdev is used with DSA, a small hook is placed in in the +When a master netdev is used with DSA, a small hook is placed in the networking stack is in order to have the DSA subsystem process the Ethernet switch specific tagging protocol. DSA accomplishes this by registering a specific (and fake) Ethernet type (later becoming ``skb->protocol``) with the diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index b72f89d5694c9064460b7617cb5946333a2c1d22..837d51f9e1fab7c0999a51184f95971fb43c1b9b 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -741,7 +741,7 @@ tcp_fastopen - INTEGER Default: 0x1 - Note that that additional client or server features are only + Note that additional client or server features are only effective if the basic support (0x1 and 0x2) are enabled respectively. tcp_fastopen_blackhole_timeout_sec - INTEGER diff --git a/Documentation/networking/ipvs-sysctl.rst b/Documentation/networking/ipvs-sysctl.rst index be36c4600e8f5b9995d4a4bee022c036c01efe6c..2afccc63856ee07357d6d800d9f200b3f6549d94 100644 --- a/Documentation/networking/ipvs-sysctl.rst +++ b/Documentation/networking/ipvs-sysctl.rst @@ -114,7 +114,7 @@ drop_entry - INTEGER modes (when there is no enough available memory, the strategy is enabled and the variable is automatically set to 2, otherwise the strategy is disabled and the variable is set to - 1), and 3 means that that the strategy is always enabled. + 1), and 3 means that the strategy is always enabled. drop_packet - INTEGER - 0 - disabled (default) diff --git a/Documentation/networking/rxrpc.rst b/Documentation/networking/rxrpc.rst index 68552b92dc44249862c17b22cef6313be2968608..39c2249c7aa78d2fbda659d2f49676e60ede034b 100644 --- a/Documentation/networking/rxrpc.rst +++ b/Documentation/networking/rxrpc.rst @@ -186,7 +186,7 @@ About the AF_RXRPC driver: time [tunable] after the last connection using it discarded, in case a new connection is made that could use it. - (#) A client-side connection is only shared between calls if they have have + (#) A client-side connection is only shared between calls if they have the same key struct describing their security (and assuming the calls would otherwise share the connection). Non-secured calls would also be able to share connections with each other. diff --git a/Documentation/powerpc/vas-api.rst b/Documentation/powerpc/vas-api.rst index 1217c2f1595e8c3c9bfcf568f545ab129dcb6682..788dc8375a0e444ac5928712565421aaa81922ac 100644 --- a/Documentation/powerpc/vas-api.rst +++ b/Documentation/powerpc/vas-api.rst @@ -213,7 +213,7 @@ request buffers are not in memory. The operating system handles the fault by updating CSB with the following data: csb.flags = CSB_V; - csb.cc = CSB_CC_TRANSLATION; + csb.cc = CSB_CC_FAULT_ADDRESS; csb.ce = CSB_CE_TERMINATION; csb.address = fault_address; diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst index 5cfb54c2aaa6e639e771d65a7682cef85c53c868..8f68e728ae6ba5f3550763cfda0b293cb4601781 100644 --- a/Documentation/process/changes.rst +++ b/Documentation/process/changes.rst @@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils. ====================== =============== ======================================== Program Minimal version Command to check the version ====================== =============== ======================================== -GNU C 4.8 gcc --version +GNU C 4.9 gcc --version GNU make 3.81 make --version binutils 2.23 ld -v flex 2.5.35 flex --version diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst index 2657a55c6f120d1c01e3f168cc189d3483264269..1bee6f8affdb8f371d34094bf66111470c25a05b 100644 --- a/Documentation/process/coding-style.rst +++ b/Documentation/process/coding-style.rst @@ -319,6 +319,26 @@ If you are afraid to mix up your local variable names, you have another problem, which is called the function-growth-hormone-imbalance syndrome. See chapter 6 (Functions). +For symbol names and documentation, avoid introducing new usage of +'master / slave' (or 'slave' independent of 'master') and 'blacklist / +whitelist'. + +Recommended replacements for 'master / slave' are: + '{primary,main} / {secondary,replica,subordinate}' + '{initiator,requester} / {target,responder}' + '{controller,host} / {device,worker,proxy}' + 'leader / follower' + 'director / performer' + +Recommended replacements for 'blacklist/whitelist' are: + 'denylist / allowlist' + 'blocklist / passlist' + +Exceptions for introducing new usage is to maintain a userspace ABI/API, +or when updating code for an existing (as of 2020) hardware or protocol +specification that mandates those terms. For new specifications +translate specification usage of the terminology to the kernel coding +standard where possible. 5) Typedefs ----------- diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 426f94582b7a1a58a21e7e703beb3bc052440f90..320788f81a051ac8db3703a2e791ff8879d05915 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -4339,14 +4339,15 @@ Errors: #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 struct kvm_vmx_nested_state_hdr { - __u32 flags; __u64 vmxon_pa; __u64 vmcs12_pa; - __u64 preemption_timer_deadline; struct { __u16 flags; } smm; + + __u32 flags; + __u64 preemption_timer_deadline; }; struct kvm_vmx_nested_state_data { diff --git a/MAINTAINERS b/MAINTAINERS index 1d4aa7f942de145be67b32a84dae1890bc6d8aaf..4e2698cc7e23960a5102f6abe34ac215e88bdb87 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -782,7 +782,7 @@ F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h F: include/linux/mfd/altera-a10sr.h ALTERA TRIPLE SPEED ETHERNET DRIVER -M: Thor Thayer +M: Joyce Ooi L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/altera/ @@ -1425,7 +1425,7 @@ F: arch/arm*/include/asm/perf_event.h F: arch/arm*/kernel/hw_breakpoint.c F: arch/arm*/kernel/perf_* F: arch/arm/oprofile/common.c -F: drivers/perf/* +F: drivers/perf/ F: include/linux/perf/arm_pmu.h ARM PORT @@ -2929,6 +2929,7 @@ F: include/uapi/linux/atm* ATMEL MACB ETHERNET DRIVER M: Nicolas Ferre +M: Claudiu Beznea S: Supported F: drivers/net/ethernet/cadence/ @@ -3306,7 +3307,7 @@ X: arch/riscv/net/bpf_jit_comp32.c BPF JIT for S390 M: Ilya Leoshkevich -M: Heiko Carstens +M: Heiko Carstens M: Vasily Gorbik L: netdev@vger.kernel.org L: bpf@vger.kernel.org @@ -5021,7 +5022,6 @@ F: drivers/mfd/da91??-*.c F: drivers/pinctrl/pinctrl-da90??.c F: drivers/power/supply/da9052-battery.c F: drivers/power/supply/da91??-*.c -F: drivers/regulator/da903x.c F: drivers/regulator/da9???-regulator.[ch] F: drivers/regulator/slg51000-regulator.[ch] F: drivers/rtc/rtc-da90??.c @@ -5111,7 +5111,7 @@ M: Vinod Koul L: dmaengine@vger.kernel.org S: Maintained Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ -T: git git://git.infradead.org/users/vkoul/slave-dma.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git F: Documentation/devicetree/bindings/dma/ F: Documentation/driver-api/dmaengine/ F: drivers/dma/ @@ -6956,6 +6956,7 @@ M: Timur Tabi M: Nicolin Chen M: Xiubo Li R: Fabio Estevam +R: Shengjiu Wang L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: linuxppc-dev@lists.ozlabs.org S: Maintained @@ -9305,6 +9306,17 @@ F: Documentation/kbuild/kconfig* F: scripts/Kconfig.include F: scripts/kconfig/ +KCOV +R: Dmitry Vyukov +R: Andrey Konovalov +L: kasan-dev@googlegroups.com +S: Maintained +F: Documentation/dev-tools/kcov.rst +F: include/linux/kcov.h +F: include/uapi/linux/kcov.h +F: kernel/kcov.c +F: scripts/Makefile.kcov + KCSAN M: Marco Elver R: Dmitry Vyukov @@ -11240,7 +11252,7 @@ S: Maintained F: drivers/crypto/atmel-ecc.* MICROCHIP I2C DRIVER -M: Ludovic Desroches +M: Codrin Ciubotariu L: linux-i2c@vger.kernel.org S: Supported F: drivers/i2c/busses/i2c-at91-*.c @@ -11333,17 +11345,17 @@ F: drivers/iio/adc/at91-sama5d2_adc.c F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER -M: Nicolas Ferre +M: Claudiu Beznea S: Supported F: drivers/power/reset/at91-sama5d2_shdwc.c MICROCHIP SPI DRIVER -M: Nicolas Ferre +M: Tudor Ambarus S: Supported F: drivers/spi/spi-atmel.* MICROCHIP SSC DRIVER -M: Nicolas Ferre +M: Codrin Ciubotariu L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/misc/atmel-ssc.c @@ -14176,7 +14188,8 @@ F: Documentation/devicetree/bindings/net/qcom,ethqos.txt F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c QUALCOMM GENERIC INTERFACE I2C DRIVER -M: Alok Chauhan +M: Akash Asthana +M: Mukesh Savaliya L: linux-i2c@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Supported @@ -14831,7 +14844,7 @@ S: Maintained F: drivers/video/fbdev/savage/ S390 -M: Heiko Carstens +M: Heiko Carstens M: Vasily Gorbik M: Christian Borntraeger L: linux-s390@vger.kernel.org @@ -14862,7 +14875,8 @@ F: drivers/s390/block/dasd* F: include/linux/dasd_mod.h S390 IOMMU (PCI) -M: Gerald Schaefer +M: Matthew Rosato +M: Gerald Schaefer L: linux-s390@vger.kernel.org S: Supported W: http://www.ibm.com/developerworks/linux/linux390/ @@ -14890,7 +14904,7 @@ F: drivers/s390/net/ S390 PCI SUBSYSTEM M: Niklas Schnelle -M: Gerald Schaefer +M: Gerald Schaefer L: linux-s390@vger.kernel.org S: Supported W: http://www.ibm.com/developerworks/linux/linux390/ @@ -17513,7 +17527,7 @@ F: Documentation/admin-guide/ufs.rst F: fs/ufs/ UHID USERSPACE HID IO DRIVER -M: David Herrmann +M: David Rheinsberg L: linux-input@vger.kernel.org S: Maintained F: drivers/hid/uhid.c @@ -18472,7 +18486,7 @@ S: Maintained F: drivers/rtc/rtc-sd3078.c WIIMOTE HID DRIVER -M: David Herrmann +M: David Rheinsberg L: linux-input@vger.kernel.org S: Maintained F: drivers/hid/hid-wiimote* diff --git a/Makefile b/Makefile index fe0164a654c769200612a921b1432231c69580f3..24a4c1b97bb07cc47028722195b20d0306ca0836 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 8 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = NAME = Kleptomaniac Octopus # *DOCUMENTATION* @@ -567,7 +567,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) ifneq ($(CROSS_COMPILE),) CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) -CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) +CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif ifneq ($(GCC_TOOLCHAIN),) @@ -1754,7 +1754,7 @@ PHONY += descend $(build-dirs) descend: $(build-dirs) $(build-dirs): prepare $(Q)$(MAKE) $(build)=$@ \ - single-build=$(if $(filter-out $@/, $(filter $@/%, $(single-no-ko))),1) \ + single-build=$(if $(filter-out $@/, $(filter $@/%, $(KBUILD_SINGLE_TARGETS))),1) \ need-builtin=1 need-modorder=1 clean-dirs := $(addprefix _clean_, $(clean-dirs)) diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi index 7d19395e30c810daea6a7f5df95d9545b4c3e1b8..906ac29f017db597f9ae993893cf30bd1cf58699 100644 --- a/arch/arm/boot/dts/am437x-l4.dtsi +++ b/arch/arm/boot/dts/am437x-l4.dtsi @@ -1540,8 +1540,9 @@ target-module@cc000 { /* 0x481cc000, ap 50 46.0 */ reg = <0xcc020 0x4>; reg-names = "rev"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ - clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>; - clock-names = "fck"; + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>, + <&dcan0_fck>; + clock-names = "fck", "osc"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0xcc000 0x2000>; @@ -1549,6 +1550,8 @@ target-module@cc000 { /* 0x481cc000, ap 50 46.0 */ dcan0: can@0 { compatible = "ti,am4372-d_can", "ti,am3352-d_can"; reg = <0x0 0x2000>; + clocks = <&dcan0_fck>; + clock-names = "fck"; syscon-raminit = <&scm_conf 0x644 0>; interrupts = ; status = "disabled"; @@ -1560,8 +1563,9 @@ target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */ reg = <0xd0020 0x4>; reg-names = "rev"; /* Domains (P, C): per_pwrdm, l4ls_clkdm */ - clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>; - clock-names = "fck"; + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>, + <&dcan1_fck>; + clock-names = "fck", "osc"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0xd0000 0x2000>; @@ -1569,6 +1573,8 @@ target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */ dcan1: can@0 { compatible = "ti,am4372-d_can", "ti,am3352-d_can"; reg = <0x0 0x2000>; + clocks = <&dcan1_fck>; + clock-name = "fck"; syscon-raminit = <&scm_conf 0x644 1>; interrupts = ; status = "disabled"; diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 348116501aa2b02f4ef9eedc7e23d1de815483da..9b1a24cc5e91f2f52c32eeb248831f60d13040e0 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -342,7 +342,8 @@ gateclk: clock-gating-control@18220 { comphy: phy@18300 { compatible = "marvell,armada-380-comphy"; - reg = <0x18300 0x100>; + reg-names = "comphy", "conf"; + reg = <0x18300 0x100>, <0x18460 4>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi index c38e86eedcc011a94ae4f4af3e56099dffe11296..8c33510c9519d620c0a91c27dc734e0d5b857e86 100644 --- a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi @@ -110,7 +110,7 @@ sound-digital { simple-audio-card,frame-master = <&sound_codec>; sound_cpu: simple-audio-card,cpu { - sound-dai = <&ssi2>; + sound-dai = <&ssi1>; }; sound_codec: simple-audio-card,codec { diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi index 756f3a9f1b4fed2cafb4766ea63a095890c6b318..12997dae35d976c463f87e3f7d85bb27ced494c8 100644 --- a/arch/arm/boot/dts/imx6qdl-icore.dtsi +++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi @@ -397,7 +397,7 @@ MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1 pinctrl_usbotg: usbotggrp { fsl,pins = < - MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059 + MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059 >; }; @@ -409,6 +409,7 @@ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17070 MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070 MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070 MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070 + MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0 >; }; diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts index 825924448ab4a2ebbc029bc2f04f5aeb382fc1b0..14fd1de52a68699b65f6393f49a5b976a50e1e86 100644 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts @@ -99,7 +99,7 @@ ethphy1: ethernet-phy@1 { &fec2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet2>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy0>; fsl,magic-packet; status = "okay"; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi index 3e5fb72f21fc88fdab791ef91eca069a33f1793d..c99aa273c2962a3e93cd6dc40a43e46821379822 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi @@ -213,7 +213,7 @@ ethphy2: ethernet-phy@2 { &fec2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet2>; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <ðphy2>; status = "okay"; }; diff --git a/arch/arm/boot/dts/keystone-k2g-evm.dts b/arch/arm/boot/dts/keystone-k2g-evm.dts index db640bab8c1d4a150287e89bcb9aae55473fecf9..8b3d64c913d868be5d0f1eb886fe1a75bf78da89 100644 --- a/arch/arm/boot/dts/keystone-k2g-evm.dts +++ b/arch/arm/boot/dts/keystone-k2g-evm.dts @@ -402,7 +402,7 @@ ethphy0: ethernet-phy@0 { &gbe0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-id"; + phy-mode = "rgmii-rxid"; status = "okay"; }; diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index ae89deaa8c9c4f9ae2f0e2b98463f3fefb293e7e..91129dc70d835da505ce2491003f916e998fe4a2 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -11,7 +11,7 @@ / { #size-cells = <1>; interrupt-parent = <&gic>; - L2: l2-cache-controller@c4200000 { + L2: cache-controller@c4200000 { compatible = "arm,pl310-cache"; reg = <0xc4200000 0x1000>; cache-unified; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 4089d97405c950e0148180334d0e5fbe88154df8..3dbcae3d60d285af39798a7746ab32f6cb7f71e6 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -105,6 +105,14 @@ proximity_sensor { linux,code = ; linux,can-disable; }; + + machine_cover { + label = "Machine Cover"; + gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ + linux,input-type = ; + linux,code = ; + linux,can-disable; + }; }; isp1707: isp1707 { @@ -819,10 +827,6 @@ &mmc1 { pinctrl-0 = <&mmc1_pins>; vmmc-supply = <&vmmc1>; bus-width = <4>; - /* For debugging, it is often good idea to remove this GPIO. - It means you can remove back cover (to reboot by removing - battery) and still use the MMC card. */ - cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ }; /* most boards use vaux3, only some old versions use vmmc2 instead */ diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index c2b54af417a2ff0355297804582141c12f0279fc..78f3267d9cbf202a17784e2f04aa3966a1920b2c 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -726,7 +726,7 @@ ocram-ecc@ffd08144 { }; }; - L2: l2-cache@fffef000 { + L2: cache-controller@fffef000 { compatible = "arm,pl310-cache"; reg = <0xfffef000 0x1000>; interrupts = <0 38 0x04>; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index 3b8571b8b41290d358692a639f700392bcb2e1fb..8f614c4b0e3ebce0f8adc2ae4694280bcdf2b223 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -636,7 +636,7 @@ sdr: sdr@ffcfb100 { reg = <0xffcfb100 0x80>; }; - L2: l2-cache@fffff000 { + L2: cache-controller@fffff000 { compatible = "arm,pl310-cache"; reg = <0xfffff000 0x1000>; interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index bf531efc0610e7f0921c2ed6b9e59fa7e148e233..0f95a6ef8543a77513cabf78c131d8afa3589565 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -198,7 +198,7 @@ reserved-memory { default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi index e6b036734a643a6714554d176fe570f7dc574538..c2b4fbf552a384014d3a326311bbae7587892210 100644 --- a/arch/arm/boot/dts/sun5i.dtsi +++ b/arch/arm/boot/dts/sun5i.dtsi @@ -117,7 +117,7 @@ reserved-memory { default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index ffe1d10a1a84674f9ac4479b6c392c25023ef0b8..6d6a37940db2aa6e14ab765f37dd0d14cdeb577f 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -181,7 +181,7 @@ reserved-memory { default-pool { compatible = "shared-dma-pool"; size = <0x6000000>; - alloc-ranges = <0x4a000000 0x6000000>; + alloc-ranges = <0x40000000 0x10000000>; reusable; linux,cma-default; }; diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index f44f448537f215119ddec04890f6f05bd8307238..1a3eedbac4a20ac5d2ca7f29cc35f7f9b34de6d9 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -5,6 +5,8 @@ #ifndef _ASM_ARM_PERCPU_H_ #define _ASM_ARM_PERCPU_H_ +#include + /* * Same as asm-generic/percpu.h, except that we store the per cpu offset * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index c036a4a2f8e213fb650a1759781afa6cbba31005..a1570c8bab25acd9bdbfbc7f26db705fa923f873 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -31,15 +31,6 @@ #if defined(__APCS_26__) #error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32 #endif -/* - * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854 - * miscompiles find_get_entry(), and can result in EXT3 and EXT4 - * filesystem corruption (possibly other FS too). - */ -#if defined(GCC_VERSION) && GCC_VERSION >= 40800 && GCC_VERSION < 40803 -#error Your compiler is too buggy; it is known to miscompile kernels -#error and result in filesystem corruption and oopses. -#endif int main(void) { diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 02ca7adf537545fa14ca0de9a0af21c3f5ea780b..7fff88e612525cfba94b4987ee526f58285debd5 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } +static int watchpoint_fault_on_uaccess(struct pt_regs *regs, + struct arch_hw_breakpoint *info) +{ + return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; +} + static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { @@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, } pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + + /* + * If we triggered a user watchpoint from a uaccess routine, + * then handle the stepping ourselves since userspace really + * can't help us with this. + */ + if (watchpoint_fault_on_uaccess(regs, info)) + goto step; + perf_bp_event(wp, regs); /* - * If no overflow handler is present, insert a temporary - * mismatch breakpoint so we can single-step over the - * watchpoint trigger. + * Defer stepping to the overflow handler if one is installed. + * Otherwise, insert a temporary mismatch breakpoint so that + * we can single-step over the watchpoint trigger. */ - if (is_default_overflow_handler(wp)) - enable_single_step(wp, instruction_pointer(regs)); + if (!is_default_overflow_handler(wp)) + goto unlock; +step: + enable_single_step(wp, instruction_pointer(regs)); unlock: rcu_read_unlock(); } diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index 6bfdca4769a70cd932302707733929f9d0a54816..fddd08a6e063e362acfdf6d3ed433bb1ad1f21df 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr) if (!cntvct_ok) { vdso_nullpatch_one(&einfo, "__vdso_gettimeofday"); vdso_nullpatch_one(&einfo, "__vdso_clock_gettime"); + vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64"); } } diff --git a/arch/arm/mach-imx/devices/devices-common.h b/arch/arm/mach-imx/devices/devices-common.h index 2a685adec1df754979792138a6507d58e2b3086c..ae84c08e11fa4acd85dd0426540299416808b9f6 100644 --- a/arch/arm/mach-imx/devices/devices-common.h +++ b/arch/arm/mach-imx/devices/devices-common.h @@ -289,6 +289,6 @@ struct platform_device *__init imx_add_spi_imx( const struct spi_imx_master *pdata); struct platform_device *imx_add_imx_dma(char *name, resource_size_t iobase, - int irq, int irq_err); + int irq); struct platform_device *imx_add_imx_sdma(char *name, resource_size_t iobase, int irq, struct sdma_platform_data *pdata); diff --git a/arch/arm/mach-imx/devices/platform-gpio-mxc.c b/arch/arm/mach-imx/devices/platform-gpio-mxc.c index 78628ef126724c201c2cd4b152e76ea10593dc1f..355de845224cb02277db453ccefe6b51cf8d829a 100644 --- a/arch/arm/mach-imx/devices/platform-gpio-mxc.c +++ b/arch/arm/mach-imx/devices/platform-gpio-mxc.c @@ -24,7 +24,8 @@ struct platform_device *__init mxc_register_gpio(char *name, int id, .flags = IORESOURCE_IRQ, }, }; + unsigned int nres; - return platform_device_register_resndata(&mxc_aips_bus, - name, id, res, ARRAY_SIZE(res), NULL, 0); + nres = irq_high ? ARRAY_SIZE(res) : ARRAY_SIZE(res) - 1; + return platform_device_register_resndata(&mxc_aips_bus, name, id, res, nres, NULL, 0); } diff --git a/arch/arm/mach-imx/devices/platform-imx-dma.c b/arch/arm/mach-imx/devices/platform-imx-dma.c index 26b47b36257bcb2daca44e003f02a313fe8f84c7..12656f24ad0d560ae0a5732bedb64c2d91de641a 100644 --- a/arch/arm/mach-imx/devices/platform-imx-dma.c +++ b/arch/arm/mach-imx/devices/platform-imx-dma.c @@ -6,7 +6,7 @@ #include "devices-common.h" struct platform_device __init __maybe_unused *imx_add_imx_dma(char *name, - resource_size_t iobase, int irq, int irq_err) + resource_size_t iobase, int irq) { struct resource res[] = { { @@ -17,10 +17,6 @@ struct platform_device __init __maybe_unused *imx_add_imx_dma(char *name, .start = irq, .end = irq, .flags = IORESOURCE_IRQ, - }, { - .start = irq_err, - .end = irq_err, - .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/arm/mach-imx/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c index 50a2edac8513a7538166eeb8f9ac5e112d01aa11..b834026e46151928e4b1caecf02fedd08bf9265e 100644 --- a/arch/arm/mach-imx/mm-imx21.c +++ b/arch/arm/mach-imx/mm-imx21.c @@ -78,8 +78,7 @@ void __init imx21_soc_init(void) mxc_register_gpio("imx21-gpio", 5, MX21_GPIO6_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); pinctrl_provide_dummies(); - imx_add_imx_dma("imx21-dma", MX21_DMA_BASE_ADDR, - MX21_INT_DMACH0, 0); /* No ERR irq */ + imx_add_imx_dma("imx21-dma", MX21_DMA_BASE_ADDR, MX21_INT_DMACH0); platform_device_register_simple("imx21-audmux", 0, imx21_audmux_res, ARRAY_SIZE(imx21_audmux_res)); } diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c index 4e412514002529202991924ecdb065db70ee939f..2717614f101d495f27a8ee780898efb59c27b36d 100644 --- a/arch/arm/mach-imx/mm-imx27.c +++ b/arch/arm/mach-imx/mm-imx27.c @@ -79,8 +79,7 @@ void __init imx27_soc_init(void) mxc_register_gpio("imx21-gpio", 5, MX27_GPIO6_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); pinctrl_provide_dummies(); - imx_add_imx_dma("imx27-dma", MX27_DMA_BASE_ADDR, - MX27_INT_DMACH0, 0); /* No ERR irq */ + imx_add_imx_dma("imx27-dma", MX27_DMA_BASE_ADDR, MX27_INT_DMACH0); /* imx27 has the imx21 type audmux */ platform_device_register_simple("imx21-audmux", 0, imx27_audmux_res, ARRAY_SIZE(imx27_audmux_res)); diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index c630457bb228e70bc572942ec62b217d5127ef18..15b29a179c8adb0f693fbb5f665414e6f32c0190 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -3435,7 +3435,7 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh, regs = ioremap(data->module_pa, data->module_size); if (!regs) - return -ENOMEM; + goto out_free_sysc; } /* @@ -3445,13 +3445,13 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh, if (oh->class->name && strcmp(oh->class->name, data->name)) { class = kmemdup(oh->class, sizeof(*oh->class), GFP_KERNEL); if (!class) - return -ENOMEM; + goto out_unmap; } if (list_empty(&oh->slave_ports)) { oi = kcalloc(1, sizeof(*oi), GFP_KERNEL); if (!oi) - return -ENOMEM; + goto out_free_class; /* * Note that we assume interconnect interface clocks will be @@ -3478,6 +3478,14 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh, spin_unlock_irqrestore(&oh->_lock, flags); return 0; + +out_free_class: + kfree(class); +out_unmap: + iounmap(regs); +out_free_sysc: + kfree(sysc); + return -ENOMEM; } static const struct omap_hwmod_reset omap24xx_reset_quirks[] = { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 628028bfbb923a817ef4218e232506afad98bc82..bcd82614c25d0c45f7efe69674e6a95fd0aaa4ea 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -966,7 +966,7 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md, pud_t *pud; p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual); - if (!WARN_ON(!p4d)) + if (WARN_ON(!p4d)) return; pud = pud_alloc(mm, p4d, md->virtual); if (WARN_ON(!pud)) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index a0d94d063fa81c2701c7bcc4c8d415819a5fa843..70f5905954ddeb2f3f00de0d45d49293d5ac5456 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -137,7 +137,7 @@ export TEXT_OFFSET core-y += arch/arm64/ libs-y := arch/arm64/lib/ $(libs-y) -core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a +libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a # Default target when executing plain make boot := arch/arm64/boot diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi index 78b1361dfbb963b3fd94c92cd714df99dd488203..9ce78a7b117d20e26441eda4da6b0987c13975ba 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi @@ -161,6 +161,7 @@ video-codec@1c0e000 { resets = <&ccu RST_BUS_VE>; interrupts = ; allwinner,sram = <&ve_sram 1>; + iommus = <&iommu 3>; }; gpu: gpu@1800000 { diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index d1fc9c2055f4904c19e7661be8c1cb36082423ee..9498d1de730ceb43d519baeb415183b82ec35add 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -77,7 +77,7 @@ psci { method = "smc"; }; - intc: intc@fffc1000 { + intc: interrupt-controller@fffc1000 { compatible = "arm,gic-400", "arm,cortex-a15-gic"; #interrupt-cells = <3>; interrupt-controller; @@ -302,7 +302,7 @@ mmc: dwmmc0@ff808000 { status = "disabled"; }; - nand: nand@ffb90000 { + nand: nand-controller@ffb90000 { #address-cells = <1>; #size-cells = <0>; compatible = "altr,socfpga-denali-nand"; @@ -445,7 +445,7 @@ timer3: timer3@ffd00100 { clock-names = "timer"; }; - uart0: serial0@ffc02000 { + uart0: serial@ffc02000 { compatible = "snps,dw-apb-uart"; reg = <0xffc02000 0x100>; interrupts = <0 108 4>; @@ -456,7 +456,7 @@ uart0: serial0@ffc02000 { status = "disabled"; }; - uart1: serial1@ffc02100 { + uart1: serial@ffc02100 { compatible = "snps,dw-apb-uart"; reg = <0xffc02100 0x100>; interrupts = <0 109 4>; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index f6c4a15079d36d856b2a060f1cc2109cd78e75ee..feadd21bc0dc1d501b288af4d60312ec672c4a14 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -155,6 +155,7 @@ rtc@68 { }; &qspi { + status = "okay"; flash@0 { #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts index 9946515b8afd4aa4bef8db6cc3b000d14d769367..c07966740e1465facc1bb04b1ad4600257a9a8e5 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts @@ -188,6 +188,7 @@ rtc@68 { }; &qspi { + status = "okay"; flash@0 { #address-cells = <1>; #size-cells = <1>; @@ -211,12 +212,12 @@ partitions { qspi_boot: partition@0 { label = "Boot and fpga data"; - reg = <0x0 0x034B0000>; + reg = <0x0 0x03FE0000>; }; - qspi_rootfs: partition@4000000 { + qspi_rootfs: partition@3FE0000 { label = "Root Filesystem - JFFS2"; - reg = <0x034B0000 0x0EB50000>; + reg = <0x03FE0000 0x0C020000>; }; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts index 6a226faab18338f69c77c2470221454383eaf91f..9e43f4dca90dd92637527b70c7bc9c9daa87b05b 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-libretech-ac.dts @@ -10,7 +10,7 @@ #include #include -#include "meson-gxl-s905x.dtsi" +#include "meson-gxl-s805x.dtsi" / { compatible = "libretech,aml-s805x-ac", "amlogic,s805x", diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts index 867e30f1d62b8d6d8ac3501e579750d0c8465e94..eb7f5a3fefd4d9e4ebb79945675701abc2ed0e04 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x-p241.dts @@ -9,7 +9,7 @@ #include -#include "meson-gxl-s905x.dtsi" +#include "meson-gxl-s805x.dtsi" / { compatible = "amlogic,p241", "amlogic,s805x", "amlogic,meson-gxl"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi new file mode 100644 index 0000000000000000000000000000000000000000..f9d705648426e78995525c579e18ff9cce3d3e78 --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2020 BayLibre SAS + * Author: Neil Armstrong + */ + +#include "meson-gxl-s905x.dtsi" + +/ { + compatible = "amlogic,s805x", "amlogic,meson-gxl"; +}; + +/* The S805X Package doesn't seem to handle the 744MHz OPP correctly */ +&mali { + assigned-clocks = <&clkc CLKID_MALI_0_SEL>, + <&clkc CLKID_MALI_0>, + <&clkc CLKID_MALI>; /* Glitch free mux */ + assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>, + <0>, /* Do Nothing */ + <&clkc CLKID_MALI_0>; + assigned-clock-rates = <0>, /* Do Nothing */ + <666666666>, + <0>; /* Do Nothing */ +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index fc59c8534c0f7dcaad2c8f4ca1d4b4b16c695636..6c8b189884ca5b7c0272fd6a34b560fbb9b2cf8d 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -337,6 +337,11 @@ clkc: clock-controller { }; }; +&hwrng { + clocks = <&clkc CLKID_RNG0>; + clock-names = "core"; +}; + &i2c_A { clocks = <&clkc CLKID_I2C>; }; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts index 51d948323bfdd54d1cb5db13adbfbfa961246ef0..92f478def7234f92dc50ab62c8c0ae3e8ba0f2c6 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts +++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts @@ -98,6 +98,7 @@ &watchdog0 { }; &qspi { + status = "okay"; flash@0 { #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts index c8243da710414d6ae9f3155e791e920e821434f7..eb01cc96ba7a3a47315f8d1c5808b5c0a709979a 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts @@ -454,10 +454,7 @@ &cp1_eth2 { status = "okay"; phy-mode = "2500base-x"; phys = <&cp1_comphy5 2>; - fixed-link { - speed = <2500>; - full-duplex; - }; + managed = "in-band-status"; }; &cp1_spi1 { diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 883e8bace3ed2567e9d8dbdb3e8567e415ab5a5d..2ca7ba69c3189754d83a139124119454c1e92ea4 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -194,7 +194,7 @@ CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_ACPI=y CONFIG_PCI_AARDVARK=y CONFIG_PCI_TEGRA=y -CONFIG_PCIE_RCAR=y +CONFIG_PCIE_RCAR_HOST=y CONFIG_PCI_HOST_GENERIC=y CONFIG_PCI_XGENE=y CONFIG_PCIE_ALTERA=y diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 12f0eb56a1cc30e5a38c6db1d0342e47a0e3908d..619db9b4c9d5c0434e321736c7a5fec9dc6a9b1c 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { } "663:\n\t" \ newinstr "\n" \ "664:\n\t" \ - ".previous\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ - ".org . - (662b-661b) + (664b-663b)\n" \ + ".org . - (662b-661b) + (664b-663b)\n\t" \ + ".previous\n" \ ".endif\n" #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 7ae54d7d333a5ed5563543df487b8f97662669bb..9f0ec21d6327f49b8e06549ebfb2e218733a4638 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -58,6 +58,7 @@ struct arch_timer_erratum_workaround { u64 (*read_cntvct_el0)(void); int (*set_next_event_phys)(unsigned long, struct clock_event_device *); int (*set_next_event_virt)(unsigned long, struct clock_event_device *); + bool disable_compat_vdso; }; DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *, diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index b6f7bc6da5fb31f2216d3d6aa3b2f428d66cf3b4..93a161b3bf3fee9476811665296c10d9c3b919a4 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { __uint128_t tmp; u64 sum; + int n = ihl; /* we want it signed */ tmp = *(const __uint128_t *)iph; iph += 16; - ihl -= 4; + n -= 4; tmp += ((tmp >> 64) | (tmp << 64)); sum = tmp >> 64; do { sum += *(const u32 *)iph; iph += 4; - } while (--ihl); + } while (--n > 0); sum += ((sum >> 32) | (sum << 32)); return csum_fold((__force u32)(sum >> 32)); diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index e5ceea213e39a580c932e16b994922de3cfd7915..0b298f48f5bfa231aa2b398d48acc0dab0af221e 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -109,6 +109,8 @@ void disable_debug_monitors(enum dbg_active_el el); void user_rewind_single_step(struct task_struct *task); void user_fastforward_single_step(struct task_struct *task); +void user_regs_reset_single_step(struct user_pt_regs *regs, + struct task_struct *task); void kernel_enable_single_step(struct pt_regs *regs); void kernel_disable_single_step(void); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index c3e6fcc664b1b6ddacbe7347e5b42614cd2516f3..e21d4a01372fe7413392a866c7f1df6c66354257 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -380,9 +380,14 @@ struct kvm_vcpu_arch { #define vcpu_has_sve(vcpu) (system_supports_sve() && \ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) -#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \ - system_supports_generic_auth()) && \ - ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) +#ifdef CONFIG_ARM64_PTR_AUTH +#define vcpu_has_ptrauth(vcpu) \ + ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \ + cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \ + (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH) +#else +#define vcpu_has_ptrauth(vcpu) false +#endif #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 2e7e0f452301820efc4ccf45730cd5d627778d65..4d867c6446c4844c035a1a38d5660e5b70f7085c 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h @@ -67,7 +67,7 @@ extern bool arm64_use_ng_mappings; #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) -#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) +#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN) #define PAGE_S2_MEMATTR(attr) \ ({ \ diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index ea268d88b6f7e6e5318c8c9671dedb740b0d6dc1..a0c8a0b652593b66fb8e909627d92af15c0f2cde 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -30,7 +30,6 @@ #include #include #include -#include DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 65299a2dcf9cd3d0c233a74e28ef8f7f6d146bc8..cfc0672013f672aaf77a18b1c2f3a7a5e692266e 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -34,6 +34,10 @@ static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { unsigned long error = regs->regs[0]; + + if (is_compat_thread(task_thread_info(task))) + error = sign_extend64(error, 31); + return IS_ERR_VALUE(error) ? error : 0; } @@ -47,7 +51,13 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { - regs->regs[0] = (long) error ? error : val; + if (error) + val = error; + + if (is_compat_thread(task_thread_info(task))) + val = lower_32_bits(val); + + regs->regs[0] = val; } #define SYSCALL_MAX_ARGS 6 diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 6ea8b6a26ae9b956bbdeafb36203e63022092867..5e784e16ee8958d259c57d9b274626ad96f5d7c2 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -93,6 +93,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_FSCHECK (1 << TIF_FSCHECK) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_SVE (1 << TIF_SVE) diff --git a/arch/arm64/include/asm/vdso/clocksource.h b/arch/arm64/include/asm/vdso/clocksource.h index df6ea65c1decad7423bebcb6d4b71dd95ee1579f..b054d9febfb5432c46d21af10af4316134987c58 100644 --- a/arch/arm64/include/asm/vdso/clocksource.h +++ b/arch/arm64/include/asm/vdso/clocksource.h @@ -2,7 +2,10 @@ #ifndef __ASM_VDSOCLOCKSOURCE_H #define __ASM_VDSOCLOCKSOURCE_H -#define VDSO_ARCH_CLOCKMODES \ - VDSO_CLOCKMODE_ARCHTIMER +#define VDSO_ARCH_CLOCKMODES \ + /* vdso clocksource for both 32 and 64bit tasks */ \ + VDSO_CLOCKMODE_ARCHTIMER, \ + /* vdso clocksource for 64bit tasks only */ \ + VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT #endif diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h index b6907ae78e5303bc45804faeb83d3d002d7d28ad..9a625e8947ff0a8233d6a4e5e49228c57ec57e7b 100644 --- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h +++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h @@ -111,7 +111,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) * update. Return something. Core will do another round and then * see the mode change and fallback to the syscall. */ - if (clock_mode == VDSO_CLOCKMODE_NONE) + if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER) return 0; /* @@ -152,6 +152,12 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void) return ret; } +static inline bool vdso_clocksource_ok(const struct vdso_data *vd) +{ + return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER; +} +#define vdso_clocksource_ok vdso_clocksource_ok + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index d1757ef1b1e749692958681620382d09a3597ce5..73039949b5ce2f6227f11d0d1591967c38bda8f6 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature) */ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) { - unsigned long replptr; - - if (kernel_text_address(pc)) - return true; - - replptr = (unsigned long)ALT_REPL_PTR(alt); - if (pc >= replptr && pc <= (replptr + alt->alt_len)) - return false; - - /* - * Branching into *another* alternate sequence is doomed, and - * we're not even trying to fix it up. - */ - BUG(); + unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); + return !(pc >= replptr && pc <= (replptr + alt->alt_len)); } #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 8e302dc093d09ec5c8a257c53efa76194ffa3ea0..79728bfb5351ffd9cdf258ab9c1c366b0cac3e0f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -782,6 +782,7 @@ static const struct midr_range erratum_1463225[] = { MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), + {}, }; #endif diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 5df49366e9abe0cde9fa26fc632c988d062b2fca..7310a4f7f9931ab904e391740badbf109145859e 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -141,17 +141,20 @@ postcore_initcall(debug_monitors_init); /* * Single step API and exception handling. */ -static void set_regs_spsr_ss(struct pt_regs *regs) +static void set_user_regs_spsr_ss(struct user_pt_regs *regs) { regs->pstate |= DBG_SPSR_SS; } -NOKPROBE_SYMBOL(set_regs_spsr_ss); +NOKPROBE_SYMBOL(set_user_regs_spsr_ss); -static void clear_regs_spsr_ss(struct pt_regs *regs) +static void clear_user_regs_spsr_ss(struct user_pt_regs *regs) { regs->pstate &= ~DBG_SPSR_SS; } -NOKPROBE_SYMBOL(clear_regs_spsr_ss); +NOKPROBE_SYMBOL(clear_user_regs_spsr_ss); + +#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs) +#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs) static DEFINE_SPINLOCK(debug_hook_lock); static LIST_HEAD(user_step_hook); @@ -391,17 +394,26 @@ void user_rewind_single_step(struct task_struct *task) * If single step is active for this thread, then set SPSR.SS * to 1 to avoid returning to the active-pending state. */ - if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) set_regs_spsr_ss(task_pt_regs(task)); } NOKPROBE_SYMBOL(user_rewind_single_step); void user_fastforward_single_step(struct task_struct *task) { - if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) clear_regs_spsr_ss(task_pt_regs(task)); } +void user_regs_reset_single_step(struct user_pt_regs *regs, + struct task_struct *task) +{ + if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) + set_user_regs_spsr_ss(regs); + else + clear_user_regs_spsr_ss(regs); +} + /* Kernel API */ void kernel_enable_single_step(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 3dbdf9752b118fd45784a48a7bdd85c6b25e2f1b..d3be9dbf549007e19e347ca9978694a410d12a4f 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -57,7 +57,7 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr) /* * The CPU masked interrupts, and we are leaving them masked during * do_debug_exception(). Update PMR as if we had called - * local_mask_daif(). + * local_daif_mask(). */ if (system_uses_irq_prio_masking()) gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 5304d193c79dd3a67bca8d72ec9afbf1d530df2e..35de8ba60e3d5801b2539ac4354834cd7538b835 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -126,8 +126,10 @@ alternative_else_nop_endif add \dst, \dst, #(\sym - .entry.tramp.text) .endm - // This macro corrupts x0-x3. It is the caller's duty - // to save/restore them if required. + /* + * This macro corrupts x0-x3. It is the caller's duty to save/restore + * them if required. + */ .macro apply_ssbd, state, tmp1, tmp2 #ifdef CONFIG_ARM64_SSBD alternative_cb arm64_enable_wa2_handling @@ -167,13 +169,28 @@ alternative_cb_end stp x28, x29, [sp, #16 * 14] .if \el == 0 + .if \regsize == 32 + /* + * If we're returning from a 32-bit task on a system affected by + * 1418040 then re-enable userspace access to the virtual counter. + */ +#ifdef CONFIG_ARM64_ERRATUM_1418040 +alternative_if ARM64_WORKAROUND_1418040 + mrs x0, cntkctl_el1 + orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN + msr cntkctl_el1, x0 +alternative_else_nop_endif +#endif + .endif clear_gp_regs mrs x21, sp_el0 ldr_this_cpu tsk, __entry_task, x20 msr sp_el0, tsk - // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions - // when scheduling. + /* + * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions + * when scheduling. + */ ldr x19, [tsk, #TSK_TI_FLAGS] disable_step_tsk x19, x20 @@ -320,6 +337,14 @@ alternative_else_nop_endif tst x22, #PSR_MODE32_BIT // native task? b.eq 3f +#ifdef CONFIG_ARM64_ERRATUM_1418040 +alternative_if ARM64_WORKAROUND_1418040 + mrs x0, cntkctl_el1 + bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN + msr cntkctl_el1, x0 +alternative_else_nop_endif +#endif + #ifdef CONFIG_ARM64_ERRATUM_845719 alternative_if ARM64_WORKAROUND_845719 #ifdef CONFIG_PID_IN_CONTEXTIDR @@ -331,21 +356,6 @@ alternative_if ARM64_WORKAROUND_845719 alternative_else_nop_endif #endif 3: -#ifdef CONFIG_ARM64_ERRATUM_1418040 -alternative_if_not ARM64_WORKAROUND_1418040 - b 4f -alternative_else_nop_endif - /* - * if (x22.mode32 == cntkctl_el1.el0vcten) - * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten - */ - mrs x1, cntkctl_el1 - eon x0, x1, x22, lsr #3 - tbz x0, #1, 4f - eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN - msr cntkctl_el1, x1 -4: -#endif scs_save tsk, x0 /* No kernel C function calls after this as user keys are set. */ @@ -377,11 +387,11 @@ alternative_else_nop_endif .if \el == 0 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - bne 5f + bne 4f msr far_el1, x30 tramp_alias x30, tramp_exit_native br x30 -5: +4: tramp_alias x30, tramp_exit_compat br x30 #endif diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index 43119922341f81272568fc6049f0ec163d26eaa8..1a157ca33262d11de7d74eaf4d994cb9bf3ceeca 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) if (!kgdb_single_step) return DBG_HOOK_ERROR; - kgdb_handle_exception(1, SIGTRAP, 0, regs); + kgdb_handle_exception(0, SIGTRAP, 0, regs); return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_step_brk_fn); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 68b7f34a08f547e6a4b144613fe22f9cc2ba33f1..1e02e98e68dd375e35fb372d40ad9e3d2f196bc3 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1811,19 +1811,42 @@ static void tracehook_report_syscall(struct pt_regs *regs, unsigned long saved_reg; /* - * A scratch register (ip(r12) on AArch32, x7 on AArch64) is - * used to denote syscall entry/exit: + * We have some ABI weirdness here in the way that we handle syscall + * exit stops because we indicate whether or not the stop has been + * signalled from syscall entry or syscall exit by clobbering a general + * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee + * and restoring its old value after the stop. This means that: + * + * - Any writes by the tracer to this register during the stop are + * ignored/discarded. + * + * - The actual value of the register is not available during the stop, + * so the tracer cannot save it and restore it later. + * + * - Syscall stops behave differently to seccomp and pseudo-step traps + * (the latter do not nobble any registers). */ regno = (is_compat_task() ? 12 : 7); saved_reg = regs->regs[regno]; regs->regs[regno] = dir; - if (dir == PTRACE_SYSCALL_EXIT) + if (dir == PTRACE_SYSCALL_ENTER) { + if (tracehook_report_syscall_entry(regs)) + forget_syscall(regs); + regs->regs[regno] = saved_reg; + } else if (!test_thread_flag(TIF_SINGLESTEP)) { tracehook_report_syscall_exit(regs, 0); - else if (tracehook_report_syscall_entry(regs)) - forget_syscall(regs); + regs->regs[regno] = saved_reg; + } else { + regs->regs[regno] = saved_reg; - regs->regs[regno] = saved_reg; + /* + * Signal a pseudo-step exception since we are stepping but + * tracer modifications to the registers may have rewound the + * state machine. + */ + tracehook_report_syscall_exit(regs, 1); + } } int syscall_trace_enter(struct pt_regs *regs) @@ -1833,12 +1856,12 @@ int syscall_trace_enter(struct pt_regs *regs) if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU)) - return -1; + return NO_SYSCALL; } /* Do the secure computing after ptrace; failures should be fast. */ if (secure_computing() == -1) - return -1; + return NO_SYSCALL; if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, regs->syscallno); @@ -1851,12 +1874,14 @@ int syscall_trace_enter(struct pt_regs *regs) void syscall_trace_exit(struct pt_regs *regs) { + unsigned long flags = READ_ONCE(current_thread_info()->flags); + audit_syscall_exit(regs); - if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) + if (flags & _TIF_SYSCALL_TRACEPOINT) trace_sys_exit(regs, regs_return_value(regs)); - if (test_thread_flag(TIF_SYSCALL_TRACE)) + if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); rseq_syscall(regs); @@ -1934,8 +1959,8 @@ static int valid_native_regs(struct user_pt_regs *regs) */ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) { - if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) - regs->pstate &= ~DBG_SPSR_SS; + /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ + user_regs_reset_single_step(regs, task); if (is_compat_thread(task_thread_info(task))) return valid_compat_regs(regs); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 801d56cdf70176530b6ce95e444714c362a3500b..3b4f31f35e4585a6b27d8d139a4bd3e671fdc69d 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -800,7 +800,6 @@ static void setup_restart_syscall(struct pt_regs *regs) */ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { - struct task_struct *tsk = current; sigset_t *oldset = sigmask_to_save(); int usig = ksig->sig; int ret; @@ -824,14 +823,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) */ ret |= !valid_user_regs(®s->user_regs, current); - /* - * Fast forward the stepping logic so we step into the signal - * handler. - */ - if (!ret) - user_fastforward_single_step(tsk); - - signal_setup_done(ret, ksig, 0); + /* Step into the signal handler if we are stepping */ + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); } /* diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index 5f5b868292f52233f7d2b7f21f1f3d8d1ac5bdb6..5f0c04863d2c19eaecf2455b9db38eda3ef53eb5 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -50,6 +50,9 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, ret = do_ni_syscall(regs, scno); } + if (is_compat_task()) + ret = lower_32_bits(ret); + regs->regs[0] = ret; } @@ -121,7 +124,21 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, user_exit(); if (has_syscall_work(flags)) { - /* set default errno for user-issued syscall(-1) */ + /* + * The de-facto standard way to skip a system call using ptrace + * is to set the system call to -1 (NO_SYSCALL) and set x0 to a + * suitable error code for consumption by userspace. However, + * this cannot be distinguished from a user-issued syscall(-1) + * and so we must set x0 to -ENOSYS here in case the tracer doesn't + * issue the skip and we fall into trace_exit with x0 preserved. + * + * This is slightly odd because it also means that if a tracer + * sets the system call number to -1 but does not initialise x0, + * then x0 will be preserved for all system calls apart from a + * user-issued syscall(-1). However, requesting a skip and not + * setting the return value is unlikely to do anything sensible + * anyway. + */ if (scno == NO_SYSCALL) regs->regs[0] = -ENOSYS; scno = syscall_trace_enter(regs); @@ -139,7 +156,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) { local_daif_mask(); flags = current_thread_info()->flags; - if (!has_syscall_work(flags)) { + if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) { /* * We're off to userspace, where interrupts are * always enabled after we restore the flags from diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index d88148bef6b0d9503171f7cc31d7dd15f19fc542..5139a5f19256822f74fc7ed29b558c6f71a7e841 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -14,7 +14,7 @@ COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit)) COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..) CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) -CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR) +CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT)) CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments ifneq ($(COMPAT_GCC_TOOLCHAIN),) CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 8c0035cab6b629fa5732fe2e872256da1dd279f8..31058e6e7c2a3a42445df11705c66f2b664b1da9 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1326,7 +1326,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, return true; } -static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) +static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz) { pud_t *pudp; pmd_t *pmdp; @@ -1338,11 +1338,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) return false; if (pudp) - return kvm_s2pud_exec(pudp); + return sz <= PUD_SIZE && kvm_s2pud_exec(pudp); else if (pmdp) - return kvm_s2pmd_exec(pmdp); + return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp); else - return kvm_s2pte_exec(ptep); + return sz == PAGE_SIZE && kvm_s2pte_exec(ptep); } static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, @@ -1958,7 +1958,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * execute permissions, and we preserve whatever we have. */ needs_exec = exec_fault || - (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa)); + (fault_status == FSC_PERM && + stage2_is_exec(kvm, fault_ipa, vma_pagesize)); if (vma_pagesize == PUD_SIZE) { pud_t new_pud = kvm_pfn_pud(pfn, mem_type); diff --git a/arch/mips/include/asm/unroll.h b/arch/mips/include/asm/unroll.h index c628747d4ecd1895e9cbde95bbbf4179a0cea573..7dd4a80e05d6ddef8d132d09acad6251210c92cf 100644 --- a/arch/mips/include/asm/unroll.h +++ b/arch/mips/include/asm/unroll.h @@ -19,15 +19,13 @@ \ /* \ * We can't unroll if the number of iterations isn't \ - * compile-time constant. Unfortunately GCC versions \ - * up until 4.6 tend to miss obvious constants & cause \ + * compile-time constant. Unfortunately clang versions \ + * up until 8.0 tend to miss obvious constants & cause \ * this check to fail, even though they go on to \ * generate reasonable code for the switch statement, \ * so we skip the sanity check for those compilers. \ */ \ - BUILD_BUG_ON((CONFIG_GCC_VERSION >= 40700 || \ - CONFIG_CLANG_VERSION >= 80000) && \ - !__builtin_constant_p(times)); \ + BUILD_BUG_ON(!__builtin_constant_p(times)); \ \ switch (times) { \ case 32: fn(__VA_ARGS__); /* fall through */ \ diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 5ae82d925197102b240ed9970b02a3413d3a6914..d242300cacc04371038bf59d954ccc026a59c3dc 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1722,6 +1722,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, vcpu->arch.gprs[rt], *(u32 *)data); break; +#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) case sdl_op: run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( vcpu->arch.host_cp0_badvaddr) & (~0x7); @@ -1815,6 +1816,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], *(u64 *)data); break; +#endif #ifdef CONFIG_CPU_LOONGSON64 case sdc2_op: @@ -2002,6 +2004,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, } break; +#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) case ldl_op: run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( vcpu->arch.host_cp0_badvaddr) & (~0x7); @@ -2073,6 +2076,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, break; } break; +#endif #ifdef CONFIG_CPU_LOONGSON64 case ldc2_op: diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c index 3b2552fb773513eff628a4657af79a0e7eb575d5..5958217861b862e45cc1dfe80bf34c33877f60e0 100644 --- a/arch/mips/pci/pci-xtalk-bridge.c +++ b/arch/mips/pci/pci-xtalk-bridge.c @@ -627,9 +627,10 @@ static int bridge_probe(struct platform_device *pdev) return -ENOMEM; domain = irq_domain_create_hierarchy(parent, 0, 8, fn, &bridge_domain_ops, NULL); - irq_domain_free_fwnode(fn); - if (!domain) + if (!domain) { + irq_domain_free_fwnode(fn); return -ENOMEM; + } pci_set_flags(PCI_PROBE_ONLY); diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 118953d417634369047dc71bb2db3a400b7719fd..6dd4171c95305c967f7b3dc8aaabc776369f2fa0 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i) _atomic_spin_unlock_irqrestore(v, flags); } +#define atomic64_set_release(v, i) atomic64_set((v), (i)) + static __inline__ s64 atomic64_read(const atomic64_t *v) { diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index ab5c215cf46c3d81a5ef840fe46ffe740174c7ec..068958575871781355486cafe95bdba8c76b8658 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void); extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); +extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); /* don't worry...optimizer will get rid of most of this */ static inline unsigned long @@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int)old, (unsigned int)new_); + case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_); } __cmpxchg_called_with_bad_pointer(); return old; diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 70ffbcf889b8e34f6e4195e98d2c494f190693a9..2e4d1f05a92646b36b7d4eaabffaad6637bac430 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; } + +u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) +{ + unsigned long flags; + u8 prev; + + _atomic_spin_lock_irqsave(ptr, flags); + if ((prev = *ptr) == old) + *ptr = new; + _atomic_spin_unlock_irqrestore(ptr, flags); + return prev; +} diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h index 965b1f39b2a5f6dccc38ef299a0cd17fd9b51c42..b0c70a35fd0edbd96b14ff7e320a1b357ec5d410 100644 --- a/arch/powerpc/include/asm/icswx.h +++ b/arch/powerpc/include/asm/icswx.h @@ -77,6 +77,8 @@ struct coprocessor_completion_block { #define CSB_CC_CHAIN (37) #define CSB_CC_SEQUENCE (38) #define CSB_CC_HW (39) +/* P9 DD2 NX Workbook 3.2 (Table 4-36): Address translation fault */ +#define CSB_CC_FAULT_ADDRESS (250) #define CSB_SIZE (0x10) #define CSB_ALIGN CSB_SIZE diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index fa080694e581edcf96f9a2c21487cf179a8d3680..446e54c3f71e213f9b895ec29ee5c87b06ebe178 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -2551,7 +2551,7 @@ EXC_VIRT_NONE(0x5400, 0x100) INT_DEFINE_BEGIN(denorm_exception) IVEC=0x1500 IHSRR=1 - IBRANCH_COMMON=0 + IBRANCH_TO_COMMON=0 IKVM_REAL=1 INT_DEFINE_END(denorm_exception) @@ -3072,10 +3072,18 @@ do_hash_page: ori r0,r0,DSISR_BAD_FAULT_64S@l and. r0,r5,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ + + /* + * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then + * don't call hash_page, just fail the fault. This is required to + * prevent re-entrancy problems in the hash code, namely perf + * interrupts hitting while something holds H_PAGE_BUSY, and taking a + * hash fault. See the comment in hash_preload(). + */ ld r11, PACA_THREAD_INFO(r13) - lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ - andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ - bne 77f /* then don't call hash_page now */ + lwz r0,TI_PREEMPT(r11) + andis. r0,r0,NMI_MASK@h + bne 77f /* * r3 contains the trap number diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 2168372b792d708f4186f4d8a9cf1b8c1ad58a19..74da65aacbc96de91b166f123e6f39cf3acf6bd5 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -87,7 +87,7 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align, * This is very early in boot, so no harm done if the kernel crashes at * this point. */ - BUG_ON(shared_lppaca_size >= shared_lppaca_total_size); + BUG_ON(shared_lppaca_size > shared_lppaca_total_size); return ptr; } diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 468169e33c86fad2e80f326bcc7d0e45aa1525e9..9b9f92ad0e7abef0343232064264365a38a267e8 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, pgd_t *pgdir; int rc, ssize, update_flags = 0; unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0); + unsigned long flags; BUG_ON(get_region_id(ea) != USER_REGION_ID); @@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, return; #endif /* CONFIG_PPC_64K_PAGES */ + /* + * __hash_page_* must run with interrupts off, as it sets the + * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any + * time and may take a hash fault reading the user stack, see + * read_user_stack_slow() in the powerpc/perf code. + * + * If that takes a hash fault on the same page as we lock here, it + * will bail out when seeing H_PAGE_BUSY set, and retry the access + * leading to an infinite loop. + * + * Disabling interrupts here does not prevent perf interrupts, but it + * will prevent them taking hash faults (see the NMI test in + * do_hash_page), then read_user_stack's copy_from_user_nofault will + * fail and perf will fall back to read_user_stack_slow(), which + * walks the Linux page tables. + * + * Interrupts must also be off for the duration of the + * mm_is_thread_local test and update, to prevent preempt running the + * mm on another CPU (XXX: this may be racy vs kthread_use_mm). + */ + local_irq_save(flags); + /* Is that local to this CPU ? */ if (mm_is_thread_local(mm)) update_flags |= HPTE_LOCAL_UPDATE; @@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, mm_ctx_user_psize(&mm->context), mm_ctx_user_psize(&mm->context), pte_val(*ptep)); + + local_irq_restore(flags); } /* diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index ca5fcb4bff32604cd32be9a9086ec88d37d4304f..d174106bab67629618b3d1ab8ad5945e3269310c 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -354,12 +354,14 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute) u64 amr; pkey_shift = pkeyshift(pkey); - if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift))) - return true; + if (execute) + return !(read_iamr() & (IAMR_EX_BIT << pkey_shift)); + + amr = read_amr(); + if (write) + return !(amr & (AMR_WR_BIT << pkey_shift)); - amr = read_amr(); /* Delay reading amr until absolutely needed */ - return ((!write && !(amr & (AMR_RD_BIT << pkey_shift))) || - (write && !(amr & (AMR_WR_BIT << pkey_shift)))); + return !(amr & (AMR_RD_BIT << pkey_shift)); } bool arch_pte_access_permitted(u64 pte, bool write, bool execute) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index cd6a742ac6ef5d5f7a7a175cdfe7a66cd7cf04ee..01d70280d287215bae6416b3f75bdbaaba7abefe 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs) perf_read_regs(regs); + /* + * If perf interrupts hit in a local_irq_disable (soft-masked) region, + * we consider them as NMIs. This is required to prevent hash faults on + * user addresses when reading callchains. See the NMI test in + * do_hash_page. + */ nmi = perf_intr_is_nmi(regs); if (nmi) nmi_enter(); diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c index 266a6ca5e15e678e8f042f3d30787fdb0e2aead1..3d21fce254b741c3b7bb52edcea631108d42aa9c 100644 --- a/arch/powerpc/platforms/powernv/vas-fault.c +++ b/arch/powerpc/platforms/powernv/vas-fault.c @@ -79,7 +79,7 @@ static void update_csb(struct vas_window *window, csb_addr = (void __user *)be64_to_cpu(crb->csb_addr); memset(&csb, 0, sizeof(csb)); - csb.cc = CSB_CC_TRANSLATION; + csb.cc = CSB_CC_FAULT_ADDRESS; csb.ce = CSB_CE_TERMINATION; csb.cs = 0; csb.count = 0; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 128192e14ff2a83e6b29490bf4b6d78feaabd660..3230c1d48562662575ca20926bc1bd7b0c1102f3 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -23,6 +23,8 @@ config RISCV select ARCH_HAS_SET_DIRECT_MAP select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX if MMU + select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX + select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if 64BIT diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 3f1737f301ccb6631bb43479578f86f23f1e157a..d0e24aaa2aa060499d338cbeae06a7e99352c7e5 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -58,8 +58,16 @@ do { \ * The AQ/RL pair provides a RCpc critical section, but there's not really any * way we can take advantage of that here because the ordering is only enforced * on that one lock. Thus, we're just doing a full fence. + * + * Since we allow writeX to be called from preemptive regions we need at least + * an "o" in the predecessor set to ensure device writes are visible before the + * task is marked as available for scheduling on a new hart. While I don't see + * any concrete reason we need a full IO fence, it seems safer to just upgrade + * this in order to avoid any IO crossing a scheduling boundary. In both + * instances the scheduler pairs this with an mb(), so nothing is necessary on + * the new hart. */ -#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) +#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) #include diff --git a/arch/riscv/include/asm/gdb_xml.h b/arch/riscv/include/asm/gdb_xml.h index 041b45f5b99719f9430122250094312277eae380..09342111f22771c091309ac712f15a0a8374bfad 100644 --- a/arch/riscv/include/asm/gdb_xml.h +++ b/arch/riscv/include/asm/gdb_xml.h @@ -3,8 +3,7 @@ #ifndef __ASM_GDB_XML_H_ #define __ASM_GDB_XML_H_ -#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature -static const char riscv_gdb_stub_feature[64] = +const char riscv_gdb_stub_feature[64] = "PacketSize=800;qXfer:features:read+;"; static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:"; diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h index 8177a457caffa53a0dbb545718ccfbe971f5f495..46677daf708bd0b06a763a8297dfa88f369712d0 100644 --- a/arch/riscv/include/asm/kgdb.h +++ b/arch/riscv/include/asm/kgdb.h @@ -19,7 +19,6 @@ #ifndef __ASSEMBLY__ -extern int kgdb_has_hit_break(unsigned long addr); extern unsigned long kgdb_compiled_break; static inline void arch_kgdb_breakpoint(void) @@ -106,7 +105,9 @@ static inline void arch_kgdb_breakpoint(void) #define DBG_REG_BADADDR_OFF 34 #define DBG_REG_CAUSE_OFF 35 -#include +extern const char riscv_gdb_stub_feature[64]; + +#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature #endif #endif diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 1dd12a0cbb2b0b9b69796958d1fd8fe22244f7bc..464a2bbc97ea334db9ac64c09b65f8ca1f54550c 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -12,7 +12,11 @@ #include /* thread information allocation */ +#ifdef CONFIG_64BIT +#define THREAD_SIZE_ORDER (2) +#else #define THREAD_SIZE_ORDER (1) +#endif #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #ifndef __ASSEMBLY__ diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c index c3275f42d1ac815104c112ebfc6dfd3630716d53..963ed7edcff264e6dcfc4891a77bb774c0655d26 100644 --- a/arch/riscv/kernel/kgdb.c +++ b/arch/riscv/kernel/kgdb.c @@ -44,18 +44,18 @@ DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ) DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) -int decode_register_index(unsigned long opcode, int offset) +static int decode_register_index(unsigned long opcode, int offset) { return (opcode >> offset) & 0x1F; } -int decode_register_index_short(unsigned long opcode, int offset) +static int decode_register_index_short(unsigned long opcode, int offset) { return ((opcode >> offset) & 0x7) + 8; } /* Calculate the new address for after a step */ -int get_step_address(struct pt_regs *regs, unsigned long *next_addr) +static int get_step_address(struct pt_regs *regs, unsigned long *next_addr) { unsigned long pc = regs->epc; unsigned long *regs_ptr = (unsigned long *)regs; @@ -136,7 +136,7 @@ int get_step_address(struct pt_regs *regs, unsigned long *next_addr) return 0; } -int do_single_step(struct pt_regs *regs) +static int do_single_step(struct pt_regs *regs) { /* Determine where the target instruction will send us to */ unsigned long addr = 0; @@ -320,7 +320,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, return err; } -int kgdb_riscv_kgdbbreak(unsigned long addr) +static int kgdb_riscv_kgdbbreak(unsigned long addr) { if (stepped_address == addr) return KGDB_SW_SINGLE_STEP; diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index f4adb3684f3db8787530fef571a89e258539c957..79e9d55bdf1ac41a14e52e17ae06e94cc05f6259 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -95,19 +95,40 @@ void __init mem_init(void) #ifdef CONFIG_BLK_DEV_INITRD static void __init setup_initrd(void) { + phys_addr_t start; unsigned long size; - if (initrd_start >= initrd_end) { - pr_info("initrd not found or empty"); + /* Ignore the virtul address computed during device tree parsing */ + initrd_start = initrd_end = 0; + + if (!phys_initrd_size) + return; + /* + * Round the memory region to page boundaries as per free_initrd_mem() + * This allows us to detect whether the pages overlapping the initrd + * are in use, but more importantly, reserves the entire set of pages + * as we don't want these pages allocated for other purposes. + */ + start = round_down(phys_initrd_start, PAGE_SIZE); + size = phys_initrd_size + (phys_initrd_start - start); + size = round_up(size, PAGE_SIZE); + + if (!memblock_is_region_memory(start, size)) { + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region", + (u64)start, size); goto disable; } - if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) { - pr_err("initrd extends beyond end of memory"); + + if (memblock_is_region_reserved(start, size)) { + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n", + (u64)start, size); goto disable; } - size = initrd_end - initrd_start; - memblock_reserve(__pa_symbol(initrd_start), size); + memblock_reserve(start, size); + /* Now convert initrd to virtual addresses */ + initrd_start = (unsigned long)__va(phys_initrd_start); + initrd_end = initrd_start + phys_initrd_size; initrd_below_start_ok = 1; pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", @@ -126,33 +147,36 @@ void __init setup_bootmem(void) { struct memblock_region *reg; phys_addr_t mem_size = 0; + phys_addr_t total_mem = 0; + phys_addr_t mem_start, end = 0; phys_addr_t vmlinux_end = __pa_symbol(&_end); phys_addr_t vmlinux_start = __pa_symbol(&_start); /* Find the memory region containing the kernel */ for_each_memblock(memory, reg) { - phys_addr_t end = reg->base + reg->size; - - if (reg->base <= vmlinux_start && vmlinux_end <= end) { - mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); - - /* - * Remove memblock from the end of usable area to the - * end of region - */ - if (reg->base + mem_size < end) - memblock_remove(reg->base + mem_size, - end - reg->base - mem_size); - } + end = reg->base + reg->size; + if (!total_mem) + mem_start = reg->base; + if (reg->base <= vmlinux_start && vmlinux_end <= end) + BUG_ON(reg->size == 0); + total_mem = total_mem + reg->size; } - BUG_ON(mem_size == 0); + + /* + * Remove memblock from the end of usable area to the + * end of region + */ + mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET); + if (mem_start + mem_size < end) + memblock_remove(mem_start + mem_size, + end - mem_start - mem_size); /* Reserve from the start of the kernel to the end of the kernel */ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); - set_max_mapnr(PFN_DOWN(mem_size)); max_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_low_pfn = max_pfn; + set_max_mapnr(max_low_pfn); #ifdef CONFIG_BLK_DEV_INITRD setup_initrd(); diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 4a8b61806633d6c24d091332020cbe3d1550ad83..87b4ab3d3c77c3828f8f39280ef35edfeb40d662 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -44,7 +44,7 @@ asmlinkage void __init kasan_early_init(void) (__pa(((uintptr_t) kasan_early_shadow_pmd))), __pgprot(_PAGE_TABLE))); - flush_tlb_all(); + local_flush_tlb_all(); } static void __init populate(void *start, void *end) @@ -79,7 +79,7 @@ static void __init populate(void *start, void *end) pfn_pgd(PFN_DOWN(__pa(&pmd[offset])), __pgprot(_PAGE_TABLE))); - flush_tlb_all(); + local_flush_tlb_all(); memset(start, 0, end - start); } diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index 1e3df52b2b653a81013ed13cddb973c3323f452e..37265f551a119c248e62bef88927887165a3a6d1 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -292,7 +292,7 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5); CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7); CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc); CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108); -CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109); +CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109); CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); @@ -629,7 +629,7 @@ static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = { CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS), CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES), CPUMF_EVENT_PTR(cf_z15, DFLT_CC), - CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR), + CPUMF_EVENT_PTR(cf_z15, DFLT_CCFINISH), CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE), CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE), NULL, diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 82df06d720e8c254e4134489b045a34b5485fe18..3b5a4d25ca9b5e069975516050f0517543157ac4 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsigned long rste) _PAGE_YOUNG); #ifdef CONFIG_MEM_SOFT_DIRTY pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, - _PAGE_DIRTY); + _PAGE_SOFT_DIRTY); #endif pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC); diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 22d968bfe9bb689af8c9034ae458a6d83bf8cb7f..d770da3f8b6fb2adcc43f6e3bda134b5baa4ef68 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address); extern void pmd_free(struct mm_struct *mm, pmd_t *pmd); +#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp)) #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -33,13 +34,4 @@ do { \ tlb_remove_page((tlb), (pte)); \ } while (0) -#if CONFIG_PGTABLE_LEVELS > 2 -#define __pmd_free_tlb(tlb, pmdp, addr) \ -do { \ - struct page *page = virt_to_page(pmdp); \ - pgtable_pmd_page_dtor(page); \ - tlb_remove_page((tlb), page); \ -} while (0); -#endif - #endif /* __ASM_SH_PGALLOC_H */ diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 956a7a03b0c838e06b68a41c968dfa7ca7a4b3ba..9bac5bbb67f33d7e81dd48fd2950cd4a1783943f 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -199,7 +199,7 @@ syscall_trace_entry: mov.l @(OFF_R7,r15), r7 ! arg3 mov.l @(OFF_R3,r15), r3 ! syscall_nr ! - mov.l 2f, r10 ! Number of syscalls + mov.l 6f, r10 ! Number of syscalls cmp/hs r10, r3 bf syscall_call mov #-ENOSYS, r0 @@ -353,7 +353,7 @@ ENTRY(system_call) tst r9, r8 bf syscall_trace_entry ! - mov.l 2f, r8 ! Number of syscalls + mov.l 6f, r8 ! Number of syscalls cmp/hs r8, r3 bt syscall_badsys ! @@ -392,7 +392,7 @@ syscall_exit: #if !defined(CONFIG_CPU_SH2) 1: .long TRA #endif -2: .long NR_syscalls +6: .long NR_syscalls 3: .long sys_call_table 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 7619742f91c9a9c98c285de38abd2891cd8c6ebc..5a828fde7a42fecf3804b3a8358d8e479e3bed41 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -90,8 +90,8 @@ endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o -vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o +efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a # The compressed kernel is built with -fPIC/-fPIE so that a boot loader # can place it anywhere in memory and it will still run. However, since @@ -115,7 +115,7 @@ endef quiet_cmd_check-and-link-vmlinux = LD $@ cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) -$(obj)/vmlinux: $(vmlinux-objs-y) FORCE +$(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE $(call if_changed,check-and-link-vmlinux) OBJCOPYFLAGS_vmlinux.bin := -R .comment -S diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile index b7a5790d8d63e4df84f0e553377b3dcb5ba70499..08bf95dbc91126d25fc93cdc55526f6cbb064977 100644 --- a/arch/x86/entry/Makefile +++ b/arch/x86/entry/Makefile @@ -7,12 +7,20 @@ KASAN_SANITIZE := n UBSAN_SANITIZE := n KCOV_INSTRUMENT := n -CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong -CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong -CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong +CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_syscall_x32.o = $(CC_FLAGS_FTRACE) + +CFLAGS_common.o += -fno-stack-protector +CFLAGS_syscall_64.o += -fno-stack-protector +CFLAGS_syscall_32.o += -fno-stack-protector +CFLAGS_syscall_x32.o += -fno-stack-protector CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,) +CFLAGS_syscall_x32.o += $(call cc-option,-Wno-override-init,) + obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o obj-y += common.o diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index e83b3f14897cc43ba1ea31a820b5014197ad7adf..f09288431f289c16c5848e6735bb0d2e1d3f07c7 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -46,7 +46,7 @@ #include /* Check that the stack and regs on entry from user mode are sane. */ -static void check_user_regs(struct pt_regs *regs) +static noinstr void check_user_regs(struct pt_regs *regs) { if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) { /* @@ -294,7 +294,7 @@ static void __prepare_exit_to_usermode(struct pt_regs *regs) #endif } -__visible noinstr void prepare_exit_to_usermode(struct pt_regs *regs) +static noinstr void prepare_exit_to_usermode(struct pt_regs *regs) { instrumentation_begin(); __prepare_exit_to_usermode(regs); diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index eeac6dc2adaa3bcd1775dfc8bebdf70ea12b3d8b..80d3b30d3ee3e0237eb99ae567e92f4642b25deb 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -469,16 +469,15 @@ __visible noinstr void func(struct pt_regs *regs, \ .align 8 SYM_CODE_START(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR - pos = . .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) UNWIND_HINT_IRET_REGS +0 : .byte 0x6a, vector jmp asm_common_interrupt nop /* Ensure that the above is 8 bytes max */ - . = pos + 8 - pos=pos+8 - vector=vector+1 + . = 0b + 8 + vector = vector+1 .endr SYM_CODE_END(irq_entries_start) @@ -486,16 +485,15 @@ SYM_CODE_END(irq_entries_start) .align 8 SYM_CODE_START(spurious_entries_start) vector=FIRST_SYSTEM_VECTOR - pos = . .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) UNWIND_HINT_IRET_REGS +0 : .byte 0x6a, vector jmp asm_spurious_interrupt nop /* Ensure that the above is 8 bytes max */ - . = pos + 8 - pos=pos+8 - vector=vector+1 + . = 0b + 8 + vector = vector+1 .endr SYM_CODE_END(spurious_entries_start) #endif @@ -553,7 +551,7 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check); /* NMI */ DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi); -#ifdef CONFIG_XEN_PV +#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64) DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi); #endif @@ -563,7 +561,7 @@ DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug); #else DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug); #endif -#ifdef CONFIG_XEN_PV +#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64) DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug); #endif @@ -626,8 +624,8 @@ DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested #if IS_ENABLED(CONFIG_HYPERV) DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback); -DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment); -DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_STIMER0_VECTOR, sysvec_hyperv_stimer0); +DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment); +DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0); #endif #if IS_ENABLED(CONFIG_ACRN_GUEST) diff --git a/arch/x86/include/asm/io_bitmap.h b/arch/x86/include/asm/io_bitmap.h index ac1a99ffbd8d263e4dc2e8a6cba3b589a9913d58..7f080f5c7def74d1bf8a0bafec7ab413aef0a3e7 100644 --- a/arch/x86/include/asm/io_bitmap.h +++ b/arch/x86/include/asm/io_bitmap.h @@ -19,12 +19,28 @@ struct task_struct; void io_bitmap_share(struct task_struct *tsk); void io_bitmap_exit(struct task_struct *tsk); +static inline void native_tss_invalidate_io_bitmap(void) +{ + /* + * Invalidate the I/O bitmap by moving io_bitmap_base outside the + * TSS limit so any subsequent I/O access from user space will + * trigger a #GP. + * + * This is correct even when VMEXIT rewrites the TSS limit + * to 0x67 as the only requirement is that the base points + * outside the limit. + */ + this_cpu_write(cpu_tss_rw.x86_tss.io_bitmap_base, + IO_BITMAP_OFFSET_INVALID); +} + void native_tss_update_io_bitmap(void); #ifdef CONFIG_PARAVIRT_XXL #include #else #define tss_update_io_bitmap native_tss_update_io_bitmap +#define tss_invalidate_io_bitmap native_tss_invalidate_io_bitmap #endif #else diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h index 5270ff39b9afec2d83385a6eeae2d4777594dde9..a1911fea87392ec5d7148ff9fd76dbfc57b82138 100644 --- a/arch/x86/include/asm/iosf_mbi.h +++ b/arch/x86/include/asm/iosf_mbi.h @@ -39,6 +39,7 @@ #define BT_MBI_UNIT_PMC 0x04 #define BT_MBI_UNIT_GFX 0x06 #define BT_MBI_UNIT_SMI 0x0C +#define BT_MBI_UNIT_CCK 0x14 #define BT_MBI_UNIT_USB 0x43 #define BT_MBI_UNIT_SATA 0xA3 #define BT_MBI_UNIT_PCIE 0xA6 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5ca5d297df75eeb8ea7234b9896003f082d78241..3d2afecde50c68b5896c7ec30b01d6cfa6f93eb7 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -302,6 +302,11 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) } #ifdef CONFIG_X86_IOPL_IOPERM +static inline void tss_invalidate_io_bitmap(void) +{ + PVOP_VCALL0(cpu.invalidate_io_bitmap); +} + static inline void tss_update_io_bitmap(void) { PVOP_VCALL0(cpu.update_io_bitmap); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 732f62e04ddb851f47248013fac01bd62633aaf7..8dfcb2508e6d82cbb163c8fe2c452f042352b95c 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -141,6 +141,7 @@ struct pv_cpu_ops { void (*load_sp0)(unsigned long sp0); #ifdef CONFIG_X86_IOPL_IOPERM + void (*invalidate_io_bitmap)(void); void (*update_io_bitmap)(void); #endif diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 17c5a038f42d3978d1b06d7cec5f8c8afd92eaaf..0780f97c185088ce214bf7f57d917fda7aff730b 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -408,14 +408,15 @@ struct kvm_vmx_nested_state_data { }; struct kvm_vmx_nested_state_hdr { - __u32 flags; __u64 vmxon_pa; __u64 vmcs12_pa; - __u64 preemption_timer_deadline; struct { __u16 flags; } smm; + + __u32 flags; + __u64 preemption_timer_deadline; }; struct kvm_svm_nested_state_data { diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index ce61e3e7d39944eef4866a0f50bac28b2a25cf42..81ffcfbfaef2b2da2d7fd4175dad6fe88a534cd7 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2316,12 +2316,12 @@ static int mp_irqdomain_create(int ioapic) ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops, (void *)(long)ioapic); - /* Release fw handle if it was allocated above */ - if (!cfg->dev) - irq_domain_free_fwnode(fn); - - if (!ip->irqdomain) + if (!ip->irqdomain) { + /* Release fw handle if it was allocated above */ + if (!cfg->dev) + irq_domain_free_fwnode(fn); return -ENOMEM; + } ip->irqdomain->parent = parent; diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 5cbaca58af950e417618279010645aaf2868086f..c2b2911feeefebe32e56a79cf331c9e952667f90 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -263,12 +263,13 @@ void __init arch_init_msi_domain(struct irq_domain *parent) msi_default_domain = pci_msi_create_irq_domain(fn, &pci_msi_domain_info, parent); - irq_domain_free_fwnode(fn); } - if (!msi_default_domain) + if (!msi_default_domain) { + irq_domain_free_fwnode(fn); pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); - else + } else { msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK; + } } #ifdef CONFIG_IRQ_REMAP @@ -301,7 +302,8 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent, if (!fn) return NULL; d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent); - irq_domain_free_fwnode(fn); + if (!d) + irq_domain_free_fwnode(fn); return d; } #endif @@ -364,7 +366,8 @@ static struct irq_domain *dmar_get_irq_domain(void) if (fn) { dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info, x86_vector_domain); - irq_domain_free_fwnode(fn); + if (!dmar_domain) + irq_domain_free_fwnode(fn); } out: mutex_unlock(&dmar_lock); @@ -489,7 +492,10 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id) } d = msi_create_irq_domain(fn, domain_info, parent); - irq_domain_free_fwnode(fn); + if (!d) { + irq_domain_free_fwnode(fn); + kfree(domain_info); + } return d; } diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index c48be6e1f6764ca8488f42a112c9ce58ed2c043a..7649da2478d8af00ff347c9169add6cab4691eb0 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -446,12 +446,10 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, trace_vector_activate(irqd->irq, apicd->is_managed, apicd->can_reserve, reserve); - /* Nothing to do for fixed assigned vectors */ - if (!apicd->can_reserve && !apicd->is_managed) - return 0; - raw_spin_lock_irqsave(&vector_lock, flags); - if (reserve || irqd_is_managed_and_shutdown(irqd)) + if (!apicd->can_reserve && !apicd->is_managed) + assign_irq_vector_any_locked(irqd); + else if (reserve || irqd_is_managed_and_shutdown(irqd)) vector_assign_managed_shutdown(irqd); else if (apicd->is_managed) ret = activate_managed(irqd); @@ -709,7 +707,6 @@ int __init arch_early_irq_init(void) x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops, NULL); BUG_ON(x86_vector_domain == NULL); - irq_domain_free_fwnode(fn); irq_set_default_host(x86_vector_domain); arch_init_msi_domain(x86_vector_domain); @@ -775,20 +772,10 @@ void lapic_offline(void) static int apic_set_affinity(struct irq_data *irqd, const struct cpumask *dest, bool force) { - struct apic_chip_data *apicd = apic_chip_data(irqd); int err; - /* - * Core code can call here for inactive interrupts. For inactive - * interrupts which use managed or reservation mode there is no - * point in going through the vector assignment right now as the - * activation will assign a vector which fits the destination - * cpumask. Let the core code store the destination mask and be - * done with it. - */ - if (!irqd_is_activated(irqd) && - (apicd->is_managed || apicd->can_reserve)) - return IRQ_SET_MASK_OK; + if (WARN_ON_ONCE(!irqd_is_activated(irqd))) + return -EIO; raw_spin_lock(&vector_lock); cpumask_and(vector_searchmask, dest, cpu_online_mask); diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index b037cfa7c0c52ff98ddee8e4ea7ce9ae68800928..7401cc12c3ccf465d48cefc4b855e0f33e4582c7 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -71,6 +71,22 @@ static void printk_stack_address(unsigned long address, int reliable, printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); } +static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src, + unsigned int nbytes) +{ + if (!user_mode(regs)) + return copy_from_kernel_nofault(buf, (u8 *)src, nbytes); + + /* + * Make sure userspace isn't trying to trick us into dumping kernel + * memory by pointing the userspace instruction pointer at it. + */ + if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX)) + return -EINVAL; + + return copy_from_user_nmi(buf, (void __user *)src, nbytes); +} + /* * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus: * @@ -97,17 +113,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl) #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) u8 opcodes[OPCODE_BUFSIZE]; unsigned long prologue = regs->ip - PROLOGUE_SIZE; - bool bad_ip; - - /* - * Make sure userspace isn't trying to trick us into dumping kernel - * memory by pointing the userspace instruction pointer at it. - */ - bad_ip = user_mode(regs) && - __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX); - if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue, - OPCODE_BUFSIZE)) { + if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) { printk("%sCode: Bad RIP value.\n", loglvl); } else { printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index bda2e5eaca0e0c5eb618f0a48ba3b064eb1fc412..ad3a2b37927d9792028856ba92453f48a97187da 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1074,7 +1074,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of copy_part(offsetof(struct fxregs_state, st_space), 128, &xsave->i387.st_space, &kbuf, &offset_start, &count); if (header.xfeatures & XFEATURE_MASK_SSE) - copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256, + copy_part(xstate_offsets[XFEATURE_SSE], 256, &xsave->i387.xmm_space, &kbuf, &offset_start, &count); /* * Fill xsave->i387.sw_reserved value for ptrace frame: diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index f3c76252247d6ec158716021ba296b6700a32ac4..282b4ee1339f87ce315c6c766a84689b2af394c6 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -207,7 +207,7 @@ static void mask_and_ack_8259A(struct irq_data *data) * lets ACK and report it. [once per IRQ] */ if (!(spurious_irq_mask & irqmask)) { - printk(KERN_DEBUG + printk_deferred(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 674a7d66d960b02d406e1dea5f4f2f836730b9c9..de2138ba38e5d8579e75b25e4d2c6e04b67eaa71 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -324,7 +324,8 @@ struct paravirt_patch_template pv_ops = { .cpu.swapgs = native_swapgs, #ifdef CONFIG_X86_IOPL_IOPERM - .cpu.update_io_bitmap = native_tss_update_io_bitmap, + .cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap, + .cpu.update_io_bitmap = native_tss_update_io_bitmap, #endif .cpu.start_context_switch = paravirt_nop, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index f362ce0d5ac0f621b901c6d728445efe7f66c107..fe67dbd76e5179911dfae10bdb2ecb2d11e6e8a1 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -322,20 +322,6 @@ void arch_setup_new_exec(void) } #ifdef CONFIG_X86_IOPL_IOPERM -static inline void tss_invalidate_io_bitmap(struct tss_struct *tss) -{ - /* - * Invalidate the I/O bitmap by moving io_bitmap_base outside the - * TSS limit so any subsequent I/O access from user space will - * trigger a #GP. - * - * This is correct even when VMEXIT rewrites the TSS limit - * to 0x67 as the only requirement is that the base points - * outside the limit. - */ - tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID; -} - static inline void switch_to_bitmap(unsigned long tifp) { /* @@ -346,7 +332,7 @@ static inline void switch_to_bitmap(unsigned long tifp) * user mode. */ if (tifp & _TIF_IO_BITMAP) - tss_invalidate_io_bitmap(this_cpu_ptr(&cpu_tss_rw)); + tss_invalidate_io_bitmap(); } static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm) @@ -380,7 +366,7 @@ void native_tss_update_io_bitmap(void) u16 *base = &tss->x86_tss.io_bitmap_base; if (!test_thread_flag(TIF_IO_BITMAP)) { - tss_invalidate_io_bitmap(tss); + native_tss_invalidate_io_bitmap(); return; } diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 6ad43fc44556e299442a0180b90cd82d83b166d4..2fd698e28e4d5d23629fbcdbbd2ce40b89c9baef 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, * or a page fault), which can make frame pointers * unreliable. */ - if (IS_ENABLED(CONFIG_FRAME_POINTER)) return -EINVAL; } @@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, if (unwind_error(&state)) return -EINVAL; - /* Success path for non-user tasks, i.e. kthreads and idle tasks */ - if (!(task->flags & (PF_KTHREAD | PF_IDLE))) - return -EINVAL; - return 0; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index b038695f36c5e44e046b9897a3d515d5bc5f86c7..b7cb3e0716f7d4638497a19da34c721c969a44e1 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -303,6 +303,8 @@ DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check) do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs, error_code, BUS_ADRALN, NULL); + + local_irq_disable(); } #ifdef CONFIG_VMAP_STACK diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 7f969b2d240fd9a59a02eea197237dad70d8342b..ec88bbe08a3281f0eea48d79df4f2f7d99aa4e9b 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -440,8 +440,11 @@ bool unwind_next_frame(struct unwind_state *state) /* * Find the orc_entry associated with the text address. * - * Decrement call return addresses by one so they work for sibling - * calls and calls to noreturn functions. + * For a call frame (as opposed to a signal frame), state->ip points to + * the instruction after the call. That instruction's stack layout + * could be different from the call instruction's layout, for example + * if the call was to a noreturn function. So get the ORC data for the + * call instruction itself. */ orc = orc_find(state->signal ? state->ip : state->ip - 1); if (!orc) { @@ -662,6 +665,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, state->sp = task->thread.sp; state->bp = READ_ONCE_NOCHECK(frame->bp); state->ip = READ_ONCE_NOCHECK(frame->ret_addr); + state->signal = (void *)state->ip == ret_from_fork; } if (get_stack_info((unsigned long *)state->sp, state->task, diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 3bfc8dd8a43dbfb52563552284e5315859704749..9a03e5b23135af92649938ea8a9dfc1c0443f21b 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -358,6 +358,7 @@ SECTIONS .bss : AT(ADDR(.bss) - LOAD_OFFSET) { __bss_start = .; *(.bss..page_aligned) + . = ALIGN(PAGE_SIZE); *(BSS_MAIN) BSS_DECRYPTED . = ALIGN(PAGE_SIZE); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 5bf72fc86a8e9f2a348330d95b61aa044e3a5979..4ce2ddd26c0b735d7100a68059f3680f37303af4 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2195,7 +2195,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || + if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) return; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c0da4dd78ac592569ad44731c43d258eb2494ccb..5bbf76189afa4e554187ec8d8acc5bc5560b3513 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm) svm->nested.vmcb = 0; svm->vcpu.arch.hflags = 0; - if (pause_filter_count) { + if (!kvm_pause_in_guest(svm->vcpu.kvm)) { control->pause_filter_count = pause_filter_count; if (pause_filter_thresh) control->pause_filter_thresh = pause_filter_thresh; @@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm) struct kvm_vcpu *vcpu = &svm->vcpu; bool in_kernel = (svm_get_cpl(vcpu) == 0); - if (pause_filter_thresh) + if (!kvm_pause_in_guest(vcpu->kvm)) grow_ple_window(vcpu); kvm_vcpu_on_spin(vcpu, in_kernel); @@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) { - if (pause_filter_thresh) + if (!kvm_pause_in_guest(vcpu->kvm)) shrink_ple_window(vcpu); } @@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm) static int svm_vm_init(struct kvm *kvm) { + if (!pause_filter_count || !pause_filter_thresh) + kvm->arch.pause_in_guest = true; + if (avic) { int ret = avic_vm_init(kvm); if (ret) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index b26655104d4a9ccf8ceae2f5185df376b8fa38a3..11e4df5600183af206a0f5fab4adb93108bef6e4 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6079,6 +6079,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) return -EINVAL; + if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) + return -EINVAL; + /* * SMM temporarily disables VMX, so we cannot be in guest mode, * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags @@ -6108,9 +6111,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, if (ret) return ret; - /* Empty 'VMXON' state is permitted */ - if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) - return 0; + /* Empty 'VMXON' state is permitted if no VMCS loaded */ + if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) { + /* See vmx_has_valid_vmcs12. */ + if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) || + (kvm_state->flags & KVM_STATE_NESTED_EVMCS) || + (kvm_state->hdr.vmx.vmcs12_pa != -1ull)) + return -EINVAL; + else + return 0; + } if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || @@ -6176,6 +6186,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, goto error_guest_mode; } + vmx->nested.has_preemption_timer_deadline = false; if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { vmx->nested.has_preemption_timer_deadline = true; vmx->nested.preemption_timer_deadline = diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index 758bccc26cf9899c8e224b9ed039e71f9b2a4c4b..197148d76b8fdb8ab8d0dc370e56525f28426608 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -47,6 +47,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu) return to_vmx(vcpu)->nested.cached_shadow_vmcs12; } +/* + * Note: the same condition is checked against the state provided by userspace + * in vmx_set_nested_state; if it is satisfied, the nested state must include + * the VMCS12. + */ static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S index 3b2b58164ec18928329757c5364b7b6ee274d31c..40526dd85137b0377b910eb9ce2156fece74dfad 100644 --- a/arch/x86/math-emu/wm_sqrt.S +++ b/arch/x86/math-emu/wm_sqrt.S @@ -209,7 +209,7 @@ sqrt_stage_2_finish: #ifdef PARANOID /* It should be possible to get here only if the arg is ffff....ffff */ - cmp $0xffffffff,FPU_fsqrt_arg_1 + cmpl $0xffffffff,FPU_fsqrt_arg_1 jnz sqrt_stage_2_error #endif /* PARANOID */ diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index fc13cbbb2dce21f625212a1d70240f84cb0ec0dc..abb6075397f0510b5b6e5b70f322086c8873cba7 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c @@ -167,9 +167,10 @@ static struct irq_domain *uv_get_irq_domain(void) goto out; uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL); - irq_domain_free_fwnode(fn); if (uv_domain) uv_domain->parent = x86_vector_domain; + else + irq_domain_free_fwnode(fn); out: mutex_unlock(&uv_lock); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 0d68948c82ad6f13a637d2e88ad6f86cfa31d91f..c46b9f2e732ff879ec5809d04c8edf03a83da105 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -870,6 +870,17 @@ static void xen_load_sp0(unsigned long sp0) } #ifdef CONFIG_X86_IOPL_IOPERM +static void xen_invalidate_io_bitmap(void) +{ + struct physdev_set_iobitmap iobitmap = { + .bitmap = 0, + .nr_ports = 0, + }; + + native_tss_invalidate_io_bitmap(); + HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap); +} + static void xen_update_io_bitmap(void) { struct physdev_set_iobitmap iobitmap; @@ -1099,6 +1110,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .load_sp0 = xen_load_sp0, #ifdef CONFIG_X86_IOPL_IOPERM + .invalidate_io_bitmap = xen_invalidate_io_bitmap, .update_io_bitmap = xen_update_io_bitmap, #endif .io_delay = xen_io_delay, diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index d8292cc9ebdf88c3389d0ed9a50a2eea1be25790..243a5fe79d3cdfccc0d535e92d54a40ffaf217c1 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -57,7 +57,7 @@ static inline __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - if (access_ok(dst, len)) + if (access_ok(src, len)) return csum_partial_copy_generic((__force const void *)src, dst, len, sum, err_ptr, NULL); if (len) diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c index 9bae79f70301339a20f08b5921d65d0ff6f76605..99fcd63ce597f6e781ec995afcacfef952463b2a 100644 --- a/arch/xtensa/kernel/perf_event.c +++ b/arch/xtensa/kernel/perf_event.c @@ -362,9 +362,7 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id) struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); unsigned i; - for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS); - i < XCHAL_NUM_PERF_COUNTERS; - i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) { + for_each_set_bit(i, ev->used_mask, XCHAL_NUM_PERF_COUNTERS) { uint32_t v = get_er(XTENSA_PMU_PMSTAT(i)); struct perf_event *event = ev->event[i]; struct hw_perf_event *hwc = &event->hw; diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index d9204dc2656e718a3826e703819418d409036e25..be2c78f7169500e73fb6aded1803b6cd69ed9e71 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -724,7 +724,8 @@ c_start(struct seq_file *f, loff_t *pos) static void * c_next(struct seq_file *f, void *v, loff_t *pos) { - return NULL; + ++*pos; + return c_start(f, pos); } static void diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 4092555828b13a25305f250863ad2e4d42016727..24cf6972eacea605f34c2386b308680bb724be35 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -87,13 +87,13 @@ void __xtensa_libgcc_window_spill(void) } EXPORT_SYMBOL(__xtensa_libgcc_window_spill); -unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v) +unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) { BUG(); } EXPORT_SYMBOL(__sync_fetch_and_and_4); -unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) +unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v) { BUG(); } diff --git a/block/blk-mq.c b/block/blk-mq.c index a9aa6d1e44cf32bf61e7e7fa068662776f762c09..4e0d173beaa3526bc3f07a2bc13870522d0b359c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -828,10 +828,10 @@ static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { /* - * If we find a request that is inflight and the queue matches, + * If we find a request that isn't idle and the queue matches, * we know the queue is busy. Return false to stop the iteration. */ - if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) { + if (blk_mq_request_started(rq) && rq->q == hctx->queue) { bool *busy = priv; *busy = true; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index d7f43d4ea925a0dcaa1fbce77dbef3b81fc4bc62..e5fae4e838c067d2fb27996ee5446e6879638bcb 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -119,6 +119,7 @@ static int software_key_query(const struct kernel_pkey_params *params, if (IS_ERR(tfm)) return PTR_ERR(tfm); + ret = -ENOMEM; key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, GFP_KERNEL); if (!key) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 42c672f1584e909544cc496a46994784ea172d5f..cbe6aa77d50d1cff847d18aa01204e9cdb0b82b8 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -947,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_user_end(alloc, index); } mmap_read_unlock(mm); - mmput(mm); + mmput_async(mm); trace_binder_unmap_kernel_start(alloc, index); diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index d9fd70280482c6e02cb298add8140776909e0fb0..7f814da3c2d065a98482b723785edccc380e5bfb 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf) return -EMEDIUMTYPE; } dev_data = PRIV(dev); - if (!dev_data->persist) return 0; + if (!dev_data->persist) { + atm_dev_put(dev); + return 0; + } dev_data->persist = 0; - if (PRIV(dev)->vcc) return 0; + if (PRIV(dev)->vcc) { + atm_dev_put(dev); + return 0; + } kfree(dev_data); atm_dev_put(dev); atm_dev_deregister(dev); diff --git a/drivers/base/base.h b/drivers/base/base.h index 95c22c0f903609d480b09d0da60b6812f1da5bd7..40fb069a8a7e4fd9b7a358bad0f15f743071933e 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -153,7 +153,6 @@ extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); extern void device_block_probing(void); extern void device_unblock_probing(void); -extern void driver_deferred_probe_force_trigger(void); /* /sys/devices directory */ extern struct kset *devices_kset; diff --git a/drivers/base/core.c b/drivers/base/core.c index 67d39a90b45c7d904bc19b0f6bb980217bcffad8..05d414e9e8a4085a2ff4099185ec02c565e4d30b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -50,6 +50,7 @@ static DEFINE_MUTEX(wfs_lock); static LIST_HEAD(deferred_sync); static unsigned int defer_sync_state_count = 1; static unsigned int defer_fw_devlink_count; +static LIST_HEAD(deferred_fw_devlink); static DEFINE_MUTEX(defer_fw_devlink_lock); static bool fw_devlink_is_permissive(void); @@ -754,11 +755,11 @@ static void __device_links_queue_sync_state(struct device *dev, */ dev->state_synced = true; - if (WARN_ON(!list_empty(&dev->links.defer_sync))) + if (WARN_ON(!list_empty(&dev->links.defer_hook))) return; get_device(dev); - list_add_tail(&dev->links.defer_sync, list); + list_add_tail(&dev->links.defer_hook, list); } /** @@ -776,8 +777,8 @@ static void device_links_flush_sync_list(struct list_head *list, { struct device *dev, *tmp; - list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { - list_del_init(&dev->links.defer_sync); + list_for_each_entry_safe(dev, tmp, list, links.defer_hook) { + list_del_init(&dev->links.defer_hook); if (dev != dont_lock_dev) device_lock(dev); @@ -815,12 +816,12 @@ void device_links_supplier_sync_state_resume(void) if (defer_sync_state_count) goto out; - list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { + list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) { /* * Delete from deferred_sync list before queuing it to - * sync_list because defer_sync is used for both lists. + * sync_list because defer_hook is used for both lists. */ - list_del_init(&dev->links.defer_sync); + list_del_init(&dev->links.defer_hook); __device_links_queue_sync_state(dev, &sync_list); } out: @@ -838,8 +839,8 @@ late_initcall(sync_state_resume_initcall); static void __device_links_supplier_defer_sync(struct device *sup) { - if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) - list_add_tail(&sup->links.defer_sync, &deferred_sync); + if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup)) + list_add_tail(&sup->links.defer_hook, &deferred_sync); } static void device_link_drop_managed(struct device_link *link) @@ -1052,7 +1053,7 @@ void device_links_driver_cleanup(struct device *dev) WRITE_ONCE(link->status, DL_STATE_DORMANT); } - list_del_init(&dev->links.defer_sync); + list_del_init(&dev->links.defer_hook); __device_links_no_driver(dev); device_links_write_unlock(); @@ -1244,6 +1245,12 @@ static void fw_devlink_link_device(struct device *dev) fw_ret = -EAGAIN; } else { fw_ret = -ENODEV; + /* + * defer_hook is not used to add device to deferred_sync list + * until device is bound. Since deferred fw devlink also blocks + * probing, same list hook can be used for deferred_fw_devlink. + */ + list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink); } if (fw_ret == -ENODEV) @@ -1312,6 +1319,9 @@ void fw_devlink_pause(void) */ void fw_devlink_resume(void) { + struct device *dev, *tmp; + LIST_HEAD(probe_list); + mutex_lock(&defer_fw_devlink_lock); if (!defer_fw_devlink_count) { WARN(true, "Unmatched fw_devlink pause/resume!"); @@ -1323,9 +1333,19 @@ void fw_devlink_resume(void) goto out; device_link_add_missing_supplier_links(); - driver_deferred_probe_force_trigger(); + list_splice_tail_init(&deferred_fw_devlink, &probe_list); out: mutex_unlock(&defer_fw_devlink_lock); + + /* + * bus_probe_device() can cause new devices to get added and they'll + * try to grab defer_fw_devlink_lock. So, this needs to be done outside + * the defer_fw_devlink_lock. + */ + list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) { + list_del_init(&dev->links.defer_hook); + bus_probe_device(dev); + } } /* Device links support end. */ @@ -2172,7 +2192,7 @@ void device_initialize(struct device *dev) INIT_LIST_HEAD(&dev->links.consumers); INIT_LIST_HEAD(&dev->links.suppliers); INIT_LIST_HEAD(&dev->links.needs_suppliers); - INIT_LIST_HEAD(&dev->links.defer_sync); + INIT_LIST_HEAD(&dev->links.defer_hook); dev->links.status = DL_DEV_NO_DRIVER; } EXPORT_SYMBOL_GPL(device_initialize); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 9a1d940342ac4ae0ec30aeaeb6ed67bc70ceefa2..48ca81cb8ebc4b74c92dbddc12fb6464031f9f8e 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -164,11 +164,6 @@ static void driver_deferred_probe_trigger(void) if (!driver_deferred_probe_enable) return; - driver_deferred_probe_force_trigger(); -} - -void driver_deferred_probe_force_trigger(void) -{ /* * A successful probe means that all the devices in the pending list * should be triggered to be reprobed. Move all the deferred devices diff --git a/drivers/base/property.c b/drivers/base/property.c index 1e6d75e65938fb8247c66adb3baa6181f8058920..d58aa98fe9645703b5be22c16d89241f663600a5 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -721,7 +721,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev, return next; /* When no more children in primary, continue with secondary */ - if (!IS_ERR_OR_NULL(fwnode->secondary)) + if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) next = fwnode_get_next_child_node(fwnode->secondary, child); return next; diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 0fd6f97ee523fdec13623b859d76dc1dd3d0e355..1d1d26b0d27969cc556571986a5b7c0e742bac03 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -4,7 +4,7 @@ # subsystems should select the appropriate symbols. config REGMAP - default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SCCB || REGMAP_I3C) + default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C) select IRQ_DOMAIN if REGMAP_IRQ bool diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 089e5dc7144a6a1a090cbe61dd04fcc0b9fd6c38..f58baff2be0af6d0eb5e28091cce3a2c3a76a039 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -463,29 +463,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_only); - ssize_t result; - bool was_enabled, require_sync = false; + bool new_val, require_sync = false; int err; - map->lock(map->lock_arg); + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; - was_enabled = map->cache_only; + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) { - map->unlock(map->lock_arg); - return result; - } + map->lock(map->lock_arg); - if (map->cache_only && !was_enabled) { + if (new_val && !map->cache_only) { dev_warn(map->dev, "debugfs cache_only=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_only && was_enabled) { + } else if (!new_val && map->cache_only) { dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); require_sync = true; } + map->cache_only = new_val; map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); if (require_sync) { err = regcache_sync(map); @@ -493,7 +495,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file, dev_err(map->dev, "Failed to sync cache %d\n", err); } - return result; + return count; } static const struct file_operations regmap_cache_only_fops = { @@ -508,28 +510,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file, { struct regmap *map = container_of(file->private_data, struct regmap, cache_bypass); - ssize_t result; - bool was_enabled; + bool new_val; + int err; - map->lock(map->lock_arg); + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; - was_enabled = map->cache_bypass; + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; - result = debugfs_write_file_bool(file, user_buf, count, ppos); - if (result < 0) - goto out; + map->lock(map->lock_arg); - if (map->cache_bypass && !was_enabled) { + if (new_val && !map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); add_taint(TAINT_USER, LOCKDEP_STILL_OK); - } else if (!map->cache_bypass && was_enabled) { + } else if (!new_val && map->cache_bypass) { dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); } + map->cache_bypass = new_val; -out: map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); - return result; + return count; } static const struct file_operations regmap_cache_bypass_fops = { diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 06a796821e8b0f8f7ebc82211e143a22fb51e3f8..795a62a040220f5ae130e14b056007491ec4d333 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1364,7 +1364,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data) /* If the user didn't specify a name match any */ if (data) - return (*r)->name == data; + return !strcmp((*r)->name, data); else return 1; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 43cff01a5a675d47cd9bcb60ffcf1440ac8fc871..ce7e9f223b20b821e2760f955f6df5bfec8f853e 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1033,25 +1033,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, test_bit(NBD_RT_BOUND, &config->runtime_flags))) { dev_err(disk_to_dev(nbd->disk), "Device being setup by another task"); - sockfd_put(sock); - return -EBUSY; + err = -EBUSY; + goto put_socket; + } + + nsock = kzalloc(sizeof(*nsock), GFP_KERNEL); + if (!nsock) { + err = -ENOMEM; + goto put_socket; } socks = krealloc(config->socks, (config->num_connections + 1) * sizeof(struct nbd_sock *), GFP_KERNEL); if (!socks) { - sockfd_put(sock); - return -ENOMEM; + kfree(nsock); + err = -ENOMEM; + goto put_socket; } config->socks = socks; - nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); - if (!nsock) { - sockfd_put(sock); - return -ENOMEM; - } - nsock->fallback_index = -1; nsock->dead = false; mutex_init(&nsock->tx_lock); @@ -1063,6 +1064,10 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, atomic_inc(&config->live_connections); return 0; + +put_socket: + sockfd_put(sock); + return err; } static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 6e2ad90b17a37645c216af0f8f8fda95f9b1ef65..270dd810be54c3350e0c1c169a1cd38e0c15436e 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -2021,7 +2021,8 @@ static ssize_t hot_add_show(struct class *class, return ret; return scnprintf(buf, PAGE_SIZE, "%d\n", ret); } -static CLASS_ATTR_RO(hot_add); +static struct class_attribute class_attr_hot_add = + __ATTR(hot_add, 0400, hot_add_show, NULL); static ssize_t hot_remove_store(struct class *class, struct class_attribute *attr, diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index bb54fb514e40fc3b072c7b2af25efd560a2cac29..191c97b84715f4e06ba0597f4efffb2ef4cd9cd5 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -236,15 +236,14 @@ static int sysc_wait_softreset(struct sysc *ddata) syss_done = ddata->cfg.syss_mask; if (syss_offset >= 0) { - error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval, - (rstval & ddata->cfg.syss_mask) == - syss_done, - 100, MAX_MODULE_SOFTRESET_WAIT); + error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata, + rstval, (rstval & ddata->cfg.syss_mask) == + syss_done, 100, MAX_MODULE_SOFTRESET_WAIT); } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) { - error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval, - !(rstval & sysc_mask), - 100, MAX_MODULE_SOFTRESET_WAIT); + error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata, + rstval, !(rstval & sysc_mask), + 100, MAX_MODULE_SOFTRESET_WAIT); } return error; @@ -1279,7 +1278,8 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) + if (ddata->cfg.quirks & + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) return 0; return pm_runtime_force_suspend(dev); @@ -1291,7 +1291,8 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) + if (ddata->cfg.quirks & + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) return 0; return pm_runtime_force_resume(dev); @@ -1728,8 +1729,8 @@ static void sysc_quirk_rtc(struct sysc *ddata, bool lock) local_irq_save(flags); /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */ - error = readl_poll_timeout(ddata->module_va + 0x44, val, - !(val & BIT(0)), 100, 50); + error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val, + !(val & BIT(0)), 100, 50); if (error) dev_warn(ddata->dev, "rtc busy timeout\n"); /* Now we have ~15 microseconds to read/write various registers */ @@ -2864,6 +2865,24 @@ static int sysc_check_disabled_devices(struct sysc *ddata) return error; } +/* + * Ignore timers tagged with no-reset and no-idle. These are likely in use, + * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks + * are needed, we could also look at the timer register configuration. + */ +static int sysc_check_active_timer(struct sysc *ddata) +{ + if (ddata->cap->type != TI_SYSC_OMAP2_TIMER && + ddata->cap->type != TI_SYSC_OMAP4_TIMER) + return 0; + + if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && + (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) + return -EBUSY; + + return 0; +} + static const struct of_device_id sysc_match_table[] = { { .compatible = "simple-bus", }, { /* sentinel */ }, @@ -2920,6 +2939,10 @@ static int sysc_probe(struct platform_device *pdev) if (error) return error; + error = sysc_check_active_timer(ddata); + if (error) + return error; + error = sysc_get_clocks(ddata); if (error) return error; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 934c92dcb9ab784c9821d26ef916686a4252ccd4..687d4af6945d365baebefee5b005eebfb17ad2d9 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -814,7 +814,8 @@ static struct inode *devmem_inode; #ifdef CONFIG_IO_STRICT_DEVMEM void revoke_devmem(struct resource *res) { - struct inode *inode = READ_ONCE(devmem_inode); + /* pairs with smp_store_release() in devmem_init_inode() */ + struct inode *inode = smp_load_acquire(&devmem_inode); /* * Check that the initialization has completed. Losing the race @@ -1028,8 +1029,11 @@ static int devmem_init_inode(void) return rc; } - /* publish /dev/mem initialized */ - WRITE_ONCE(devmem_inode, inode); + /* + * Publish /dev/mem initialized. + * Pairs with smp_load_acquire() in revoke_devmem(). + */ + smp_store_release(&devmem_inode, inode); return 0; } diff --git a/drivers/char/random.c b/drivers/char/random.c index 2a41b21623ae42b6efdf530287c5f7e20660e3a1..d20ba1b104ca3aceadd19fb6fbcb9ba035799a62 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1277,6 +1277,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); + this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]); if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 00c5e3acee460421e84bdb91968bfc0aebff4e5a..ca691bce97919f60332a469c968c38ce08afef1f 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2116,6 +2116,7 @@ static struct virtio_device_id id_table[] = { { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, { 0 }, }; +MODULE_DEVICE_TABLE(virtio, id_table); static unsigned int features[] = { VIRTIO_CONSOLE_F_SIZE, @@ -2128,6 +2129,7 @@ static struct virtio_device_id rproc_serial_id_table[] = { #endif { 0 }, }; +MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table); static unsigned int rproc_serial_features[] = { }; @@ -2280,6 +2282,5 @@ static void __exit fini(void) module_init(init); module_exit(fini); -MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio console driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 69934c0c3dd85f49d69a4f79b5d03eb31a640b27..326f91b2dda9fdc4e474d6156c5d7d538f8fa224 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -50,6 +50,7 @@ source "drivers/clk/versatile/Kconfig" config CLK_HSDK bool "PLL Driver for HSDK platform" depends on OF || COMPILE_TEST + depends on IOMEM help This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs control. diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c index 99afc949925f0fe53efc80187b4501d1005b2ea7..177368cac6dd6afa25993bc206dd42ac65fbcae0 100644 --- a/drivers/clk/clk-ast2600.c +++ b/drivers/clk/clk-ast2600.c @@ -131,6 +131,18 @@ static const struct clk_div_table ast2600_eclk_div_table[] = { { 0 } }; +static const struct clk_div_table ast2600_emmc_extclk_div_table[] = { + { 0x0, 2 }, + { 0x1, 4 }, + { 0x2, 6 }, + { 0x3, 8 }, + { 0x4, 10 }, + { 0x5, 12 }, + { 0x6, 14 }, + { 0x7, 16 }, + { 0 } +}; + static const struct clk_div_table ast2600_mac_div_table[] = { { 0x0, 4 }, { 0x1, 4 }, @@ -390,6 +402,11 @@ static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev, return hw; } +static const char *const emmc_extclk_parent_names[] = { + "emmc_extclk_hpll_in", + "mpll", +}; + static const char * const vclk_parent_names[] = { "dpll", "d1pll", @@ -459,16 +476,32 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev) return PTR_ERR(hw); aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw; - /* EMMC ext clock divider */ - hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "hpll", 0, - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0, - &aspeed_g6_clk_lock); + /* EMMC ext clock */ + hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll", + 0, 1, 2); if (IS_ERR(hw)) return PTR_ERR(hw); - hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0, - scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0, - ast2600_div_table, - &aspeed_g6_clk_lock); + + hw = clk_hw_register_mux(dev, "emmc_extclk_mux", + emmc_extclk_parent_names, + ARRAY_SIZE(emmc_extclk_parent_names), 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1, + 0, &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux", + 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1, + 15, 0, &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hw = clk_hw_register_divider_table(dev, "emmc_extclk", + "emmc_extclk_gate", 0, + scu_g6_base + + ASPEED_G6_CLK_SELECTION1, 12, + 3, 0, ast2600_emmc_extclk_div_table, + &aspeed_g6_clk_lock); if (IS_ERR(hw)) return PTR_ERR(hw); aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw; diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig index ded07b0bd0d5e6ea2615d8728687295577e44faa..557d6213783c74c925d3739455b73d06a2015041 100644 --- a/drivers/clk/mvebu/Kconfig +++ b/drivers/clk/mvebu/Kconfig @@ -42,6 +42,7 @@ config ARMADA_AP806_SYSCON config ARMADA_AP_CPU_CLK bool + select ARMADA_AP_CP_HELPER config ARMADA_CP110_SYSCON bool diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index ecf7b7db2d050ee4fd6be01d7d41a0eebfcbe299..6c3e84180146137665e4be0b7c032acbd8a9b8ce 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -480,6 +480,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .set_next_event_virt = erratum_set_next_event_tval_virt, }, #endif +#ifdef CONFIG_ARM64_ERRATUM_1418040 + { + .match_type = ate_match_local_cap_id, + .id = (void *)ARM64_WORKAROUND_1418040, + .desc = "ARM erratum 1418040", + .disable_compat_vdso = true, + }, +#endif }; typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, @@ -566,6 +574,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa if (wa->read_cntvct_el0) { clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE; vdso_default = VDSO_CLOCKMODE_NONE; + } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) { + vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT; + clocksource_counter.vdso_clock_mode = vdso_default; } } diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index 6fd1f219a512ef9b1a2d145a4168190dec12d2a4..f6fd1c1cc527f9d45232501b1fd77c2649165f85 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -19,7 +19,7 @@ /* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */ #define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \ SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE) - +#define DMTIMER_TYPE1_DISABLE (SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE) #define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2) #define DMTIMER_RESET_WAIT 100000 @@ -44,6 +44,8 @@ struct dmtimer_systimer { u8 ctrl; u8 wakeup; u8 ifctrl; + struct clk *fck; + struct clk *ick; unsigned long rate; }; @@ -298,16 +300,20 @@ static void __init dmtimer_systimer_select_best(void) } /* Interface clocks are only available on some SoCs variants */ -static int __init dmtimer_systimer_init_clock(struct device_node *np, +static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t, + struct device_node *np, const char *name, unsigned long *rate) { struct clk *clock; unsigned long r; + bool is_ick = false; int error; + is_ick = !strncmp(name, "ick", 3); + clock = of_clk_get_by_name(np, name); - if ((PTR_ERR(clock) == -EINVAL) && !strncmp(name, "ick", 3)) + if ((PTR_ERR(clock) == -EINVAL) && is_ick) return 0; else if (IS_ERR(clock)) return PTR_ERR(clock); @@ -320,6 +326,11 @@ static int __init dmtimer_systimer_init_clock(struct device_node *np, if (!r) return -ENODEV; + if (is_ick) + t->ick = clock; + else + t->fck = clock; + *rate = r; return 0; @@ -339,7 +350,10 @@ static void dmtimer_systimer_enable(struct dmtimer_systimer *t) static void dmtimer_systimer_disable(struct dmtimer_systimer *t) { - writel_relaxed(0, t->base + t->sysc); + if (!dmtimer_systimer_revision1(t)) + return; + + writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc); } static int __init dmtimer_systimer_setup(struct device_node *np, @@ -366,13 +380,13 @@ static int __init dmtimer_systimer_setup(struct device_node *np, pr_err("%s: clock source init failed: %i\n", __func__, error); /* For ti-sysc, we have timer clocks at the parent module level */ - error = dmtimer_systimer_init_clock(np->parent, "fck", &rate); + error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate); if (error) goto err_unmap; t->rate = rate; - error = dmtimer_systimer_init_clock(np->parent, "ick", &rate); + error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate); if (error) goto err_unmap; @@ -496,12 +510,18 @@ static void omap_clockevent_idle(struct clock_event_device *evt) struct dmtimer_systimer *t = &clkevt->t; dmtimer_systimer_disable(t); + clk_disable(t->fck); } static void omap_clockevent_unidle(struct clock_event_device *evt) { struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt); struct dmtimer_systimer *t = &clkevt->t; + int error; + + error = clk_enable(t->fck); + if (error) + pr_err("could not enable timer fck on resume: %i\n", error); dmtimer_systimer_enable(t); writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena); @@ -570,8 +590,8 @@ static int __init dmtimer_clockevent_init(struct device_node *np) 3, /* Timer internal resynch latency */ 0xffffffff); - if (of_device_is_compatible(np, "ti,am33xx") || - of_device_is_compatible(np, "ti,am43")) { + if (of_machine_is_compatible("ti,am33xx") || + of_machine_is_compatible("ti,am43")) { dev->suspend = omap_clockevent_idle; dev->resume = omap_clockevent_unidle; } @@ -616,12 +636,18 @@ static void dmtimer_clocksource_suspend(struct clocksource *cs) clksrc->loadval = readl_relaxed(t->base + t->counter); dmtimer_systimer_disable(t); + clk_disable(t->fck); } static void dmtimer_clocksource_resume(struct clocksource *cs) { struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs); struct dmtimer_systimer *t = &clksrc->t; + int error; + + error = clk_enable(t->fck); + if (error) + pr_err("could not enable timer fck on resume: %i\n", error); dmtimer_systimer_enable(t); writel_relaxed(clksrc->loadval, t->base + t->counter); @@ -653,8 +679,8 @@ static int __init dmtimer_clocksource_init(struct device_node *np) dev->mask = CLOCKSOURCE_MASK(32); dev->flags = CLOCK_SOURCE_IS_CONTINUOUS; - if (of_device_is_compatible(np, "ti,am33xx") || - of_device_is_compatible(np, "ti,am43")) { + /* Unlike for clockevent, legacy code sets suspend only for am4 */ + if (of_machine_is_compatible("ti,am43")) { dev->suspend = dmtimer_clocksource_suspend; dev->resume = dmtimer_clocksource_resume; } diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index aa13708c2bc30653ba8426b99f2b4ddc82cf0c5e..d22cfae1b0198922c212ee08d491393f904bb3de 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -1274,18 +1274,26 @@ static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter, struct counter_signal *signal, void *private, char *buf) { - const struct quad8_iio *const priv = counter->priv; + struct quad8_iio *const priv = counter->priv; const size_t channel_id = signal->id / 2; - const bool disabled = !(priv->cable_fault_enable & BIT(channel_id)); + bool disabled; unsigned int status; unsigned int fault; - if (disabled) + mutex_lock(&priv->lock); + + disabled = !(priv->cable_fault_enable & BIT(channel_id)); + + if (disabled) { + mutex_unlock(&priv->lock); return -EINVAL; + } /* Logic 0 = cable fault */ status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS); + mutex_unlock(&priv->lock); + /* Mask respective channel and invert logic */ fault = !(status & BIT(channel_id)); @@ -1317,6 +1325,8 @@ static ssize_t quad8_signal_cable_fault_enable_write( if (ret) return ret; + mutex_lock(&priv->lock); + if (enable) priv->cable_fault_enable |= BIT(channel_id); else @@ -1327,6 +1337,8 @@ static ssize_t quad8_signal_cable_fault_enable_write( outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS); + mutex_unlock(&priv->lock); + return len; } @@ -1353,6 +1365,8 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter, if (ret) return ret; + mutex_lock(&priv->lock); + priv->fck_prescaler[channel_id] = prescaler; /* Reset Byte Pointer */ @@ -1363,6 +1377,8 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter, outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC, base_offset + 1); + mutex_unlock(&priv->lock); + return len; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e771e8b4f99f09cd93310eb4f32c933f2ab70e28..7e0f7880b21a66f5424d0e2e91ac6c8d54088320 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2464,7 +2464,7 @@ static struct cpufreq_driver intel_cpufreq = { .name = "intel_cpufreq", }; -static struct cpufreq_driver *default_driver = &intel_pstate; +static struct cpufreq_driver *default_driver; static void intel_pstate_driver_cleanup(void) { @@ -2758,6 +2758,7 @@ static int __init intel_pstate_init(void) hwp_active++; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; + default_driver = &intel_pstate; goto hwp_cpu_matched; } } else { @@ -2775,7 +2776,8 @@ static int __init intel_pstate_init(void) return -ENODEV; } /* Without HWP start in the passive mode. */ - default_driver = &intel_cpufreq; + if (!default_driver) + default_driver = &intel_cpufreq; hwp_cpu_matched: /* @@ -2820,6 +2822,8 @@ static int __init intel_pstate_setup(char *str) if (!strcmp(str, "disable")) { no_load = 1; + } else if (!strcmp(str, "active")) { + default_driver = &intel_pstate; } else if (!strcmp(str, "passive")) { default_driver = &intel_cpufreq; no_hwp = 1; diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index f200fae6f7cb55be0abcb0ae594a527026422e00..54093115eb95d9ffaaa5bd2adcfb9f5fe3f8d49d 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -102,7 +102,7 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev, case PF_INET: if (likely(!inet_sk(sk)->inet_rcv_saddr)) return ndev; - ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr); + ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false); break; #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index e1401d9cc756cea07f7ad310c17fed29e0f3e9db..2e9acae1cba3b74fc8cf2b1e7b0b4894ee7bf7f1 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1052,14 +1052,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) &record_type); if (err) goto out_err; + + /* Avoid appending tls handshake, alert to tls data */ + if (skb) + tx_skb_finalize(skb); } recordsz = size; csk->tlshws.txleft = recordsz; csk->tlshws.type = record_type; - - if (skb) - ULP_SKB_CB(skb)->ulp.tls.type = record_type; } if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) || diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 412629601ad3b42e130bb042329ec4e8e925a566..1ca609f66fdf889027a681c488ec0e425c3d6cdb 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -45,10 +45,10 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) size_t ret = 0; dmabuf = dentry->d_fsdata; - dma_resv_lock(dmabuf->resv, NULL); + spin_lock(&dmabuf->name_lock); if (dmabuf->name) ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); - dma_resv_unlock(dmabuf->resv); + spin_unlock(&dmabuf->name_lock); return dynamic_dname(dentry, buffer, buflen, "/%s:%s", dentry->d_name.name, ret > 0 ? name : ""); @@ -338,8 +338,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) kfree(name); goto out_unlock; } + spin_lock(&dmabuf->name_lock); kfree(dmabuf->name); dmabuf->name = name; + spin_unlock(&dmabuf->name_lock); out_unlock: dma_resv_unlock(dmabuf->resv); @@ -402,10 +404,10 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) /* Don't count the temporary reference taken inside procfs seq_show */ seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); - dma_resv_lock(dmabuf->resv, NULL); + spin_lock(&dmabuf->name_lock); if (dmabuf->name) seq_printf(m, "name:\t%s\n", dmabuf->name); - dma_resv_unlock(dmabuf->resv); + spin_unlock(&dmabuf->name_lock); } static const struct file_operations dma_buf_fops = { @@ -542,6 +544,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->size = exp_info->size; dmabuf->exp_name = exp_info->exp_name; dmabuf->owner = exp_info->owner; + spin_lock_init(&dmabuf->name_lock); init_waitqueue_head(&dmabuf->poll); dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index b175229a4b01d7992c2356d5134a754dd6bf98fe..604f803579312b0059d064ddf0164725549105b5 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -1176,6 +1176,8 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) } else if (dmatest_run) { if (!is_threaded_test_pending(info)) { pr_info("No channels configured, continue with any\n"); + if (!is_threaded_test_run(info)) + stop_threaded_test(info); add_threaded_test(info); } start_threaded_tests(info); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 21cb2a58dbd29a0c6486b683565933071a02eacf..a1b56f52db2f2437faa615c661f87d2673da583c 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -118,16 +118,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) { struct dw_dma *dw = to_dw_dma(dwc->chan.device); - if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) - return; - dw->initialize_chan(dwc); /* Enable interrupts */ channel_set_bit(dw, MASK.XFER, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask); - - set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); } /*----------------------------------------------------------------------*/ @@ -954,8 +949,6 @@ static void dwc_issue_pending(struct dma_chan *chan) void do_dw_dma_off(struct dw_dma *dw) { - unsigned int i; - dma_writel(dw, CFG, 0); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); @@ -966,9 +959,6 @@ void do_dw_dma_off(struct dw_dma *dw) while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) cpu_relax(); - - for (i = 0; i < dw->dma.chancnt; i++) - clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); } void do_dw_dma_on(struct dw_dma *dw) @@ -1032,8 +1022,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) /* Clear custom channel configuration */ memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); - clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags); - /* Disable interrupts */ channel_clear_bit(dw, MASK.XFER, dwc->mask); channel_clear_bit(dw, MASK.BLOCK, dwc->mask); diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 5697c3622699bd64093541a971193bed495cc932..930ae268c497c845f1f89b5e8f77f0e542088cc7 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -352,26 +352,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, /* * TCD parameters are stored in struct fsl_edma_hw_tcd in little * endian format. However, we need to load the TCD registers in - * big- or little-endian obeying the eDMA engine model endian. + * big- or little-endian obeying the eDMA engine model endian, + * and this is performed from specific edma_write functions */ edma_writew(edma, 0, ®s->tcd[ch].csr); - edma_writel(edma, le32_to_cpu(tcd->saddr), ®s->tcd[ch].saddr); - edma_writel(edma, le32_to_cpu(tcd->daddr), ®s->tcd[ch].daddr); - edma_writew(edma, le16_to_cpu(tcd->attr), ®s->tcd[ch].attr); - edma_writew(edma, le16_to_cpu(tcd->soff), ®s->tcd[ch].soff); + edma_writel(edma, (s32)tcd->saddr, ®s->tcd[ch].saddr); + edma_writel(edma, (s32)tcd->daddr, ®s->tcd[ch].daddr); - edma_writel(edma, le32_to_cpu(tcd->nbytes), ®s->tcd[ch].nbytes); - edma_writel(edma, le32_to_cpu(tcd->slast), ®s->tcd[ch].slast); + edma_writew(edma, (s16)tcd->attr, ®s->tcd[ch].attr); + edma_writew(edma, tcd->soff, ®s->tcd[ch].soff); - edma_writew(edma, le16_to_cpu(tcd->citer), ®s->tcd[ch].citer); - edma_writew(edma, le16_to_cpu(tcd->biter), ®s->tcd[ch].biter); - edma_writew(edma, le16_to_cpu(tcd->doff), ®s->tcd[ch].doff); + edma_writel(edma, (s32)tcd->nbytes, ®s->tcd[ch].nbytes); + edma_writel(edma, (s32)tcd->slast, ®s->tcd[ch].slast); - edma_writel(edma, le32_to_cpu(tcd->dlast_sga), + edma_writew(edma, (s16)tcd->citer, ®s->tcd[ch].citer); + edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter); + edma_writew(edma, (s16)tcd->doff, ®s->tcd[ch].doff); + + edma_writel(edma, (s32)tcd->dlast_sga, ®s->tcd[ch].dlast_sga); - edma_writew(edma, le16_to_cpu(tcd->csr), ®s->tcd[ch].csr); + edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr); } static inline @@ -589,6 +591,8 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) { struct virt_dma_desc *vdesc; + lockdep_assert_held(&fsl_chan->vchan.lock); + vdesc = vchan_next_desc(&fsl_chan->vchan); if (!vdesc) return; diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 67e422590c9aedcfbbeedfa4a9ecf8a557a6dbba..ec1169741de130891502b18950b41c48a86c09b7 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -33,7 +33,7 @@ #define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0) #define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1) #define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1)) -#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0)) +#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0)) #define EDMA_TCD_ATTR_SSIZE_8BIT 0 #define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8) #define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8) diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index eff7ebd8cf356172f8c5d4b15215d192b8821dff..90bb72af306cd4d617551430fce7a4f88c8eeb92 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -45,6 +45,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) fsl_chan = &fsl_edma->chans[ch]; spin_lock(&fsl_chan->vchan.lock); + + if (!fsl_chan->edesc) { + /* terminate_all called before */ + spin_unlock(&fsl_chan->vchan.lock); + continue; + } + if (!fsl_chan->edesc->iscyclic) { list_del(&fsl_chan->edesc->vdesc.node); vchan_cookie_complete(&fsl_chan->edesc->vdesc); diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index ff49847e37a86dd4f28dea8ef1c388187aa8be35..cb376cf6a2d2c3316967dc74dc8e09e84ef6fb5e 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -74,6 +74,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) struct idxd_device *idxd; struct idxd_wq *wq; struct device *dev; + int rc = 0; wq = inode_wq(inode); idxd = wq->idxd; @@ -81,17 +82,27 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); - if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) - return -EBUSY; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; + mutex_lock(&wq->wq_lock); + + if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) { + rc = -EBUSY; + goto failed; + } + ctx->wq = wq; filp->private_data = ctx; idxd_wq_get(wq); + mutex_unlock(&wq->wq_lock); return 0; + + failed: + mutex_unlock(&wq->wq_lock); + kfree(ctx); + return rc; } static int idxd_cdev_release(struct inode *node, struct file *filep) @@ -105,7 +116,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep) filep->private_data = NULL; kfree(ctx); + mutex_lock(&wq->wq_lock); idxd_wq_put(wq); + mutex_unlock(&wq->wq_lock); return 0; } diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 8d79a8787104d48b0463540e4c281be7f99f0cff..8d2718c585dc6511bedb8a25469e5374ae08abb0 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -320,6 +320,31 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq) devm_iounmap(dev, wq->dportal); } +void idxd_wq_disable_cleanup(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + int i, wq_offset; + + lockdep_assert_held(&idxd->dev_lock); + memset(&wq->wqcfg, 0, sizeof(wq->wqcfg)); + wq->type = IDXD_WQT_NONE; + wq->size = 0; + wq->group = NULL; + wq->threshold = 0; + wq->priority = 0; + clear_bit(WQ_FLAG_DEDICATED, &wq->flags); + memset(wq->name, 0, WQ_NAME_SIZE); + + for (i = 0; i < 8; i++) { + wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32); + iowrite32(0, idxd->reg_base + wq_offset); + dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", + wq->id, i, wq_offset, + ioread32(idxd->reg_base + wq_offset)); + } +} + /* Device control bits */ static inline bool idxd_is_enabled(struct idxd_device *idxd) { diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index b8f8a363b4a71c57213be6d5362228cab8d617b3..908c8d0ef3ab6f41c159f0ab22b6fd8f180da467 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -290,6 +290,7 @@ int idxd_wq_enable(struct idxd_wq *wq); int idxd_wq_disable(struct idxd_wq *wq); int idxd_wq_map_portal(struct idxd_wq *wq); void idxd_wq_unmap_portal(struct idxd_wq *wq); +void idxd_wq_disable_cleanup(struct idxd_wq *wq); /* submission */ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index 6510791b9921b4dabc49a02e8a5423ce8b605a96..8a35f58da689092b8555aac1426b88a0ae02a637 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -141,7 +141,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data) iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); if (!err) - return IRQ_HANDLED; + goto out; gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); if (gensts.state == IDXD_DEVICE_STATE_HALT) { @@ -162,6 +162,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data) spin_unlock_bh(&idxd->dev_lock); } + out: idxd_unmask_msix_vector(idxd, irq_entry->id); return IRQ_HANDLED; } diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 052dae5d6dddbd6ff25d6a6b151b462b23aee91f..2e2c5082f3220cb4616ac6f218deb0e61a4e1080 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -315,6 +315,11 @@ static int idxd_config_bus_remove(struct device *dev) idxd_unregister_dma_device(idxd); spin_lock_irqsave(&idxd->dev_lock, flags); rc = idxd_device_disable(idxd); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + idxd_wq_disable_cleanup(wq); + } spin_unlock_irqrestore(&idxd->dev_lock, flags); module_put(THIS_MODULE); if (rc < 0) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 91774039ae5d6345c14faf7ffb4685f2f8429f33..270992c4fe47506020706e1db2f3649ea7fce183 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1331,8 +1331,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) sdma_channel_synchronize(chan); - if (sdmac->event_id0 >= 0) - sdma_event_disable(sdmac, sdmac->event_id0); + sdma_event_disable(sdmac, sdmac->event_id0); if (sdmac->event_id1) sdma_event_disable(sdmac, sdmac->event_id1); @@ -1632,11 +1631,9 @@ static int sdma_config(struct dma_chan *chan, memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); /* Set ENBLn earlier to make sure dma request triggered after that */ - if (sdmac->event_id0 >= 0) { - if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) - return -EINVAL; - sdma_event_enable(sdmac, sdmac->event_id0); - } + if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) + return -EINVAL; + sdma_event_enable(sdmac, sdmac->event_id0); if (sdmac->event_id1) { if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 8ad0ad861c86119ba09824abe2deeb560c44fa83..fd782aee02d92d27626e6a87494e78bc6fe956a5 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -26,6 +26,18 @@ #include "../dmaengine.h" +int completion_timeout = 200; +module_param(completion_timeout, int, 0644); +MODULE_PARM_DESC(completion_timeout, + "set ioat completion timeout [msec] (default 200 [msec])"); +int idle_timeout = 2000; +module_param(idle_timeout, int, 0644); +MODULE_PARM_DESC(idle_timeout, + "set ioat idel timeout [msec] (default 2000 [msec])"); + +#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout) +#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout) + static char *chanerr_str[] = { "DMA Transfer Source Address Error", "DMA Transfer Destination Address Error", diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index e6b622e1ba92eee88d3e54bed6af3c8ca8f2ee84..f7f31fdf14cf916578f9c617890506a77f343a73 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -104,8 +104,6 @@ struct ioatdma_chan { #define IOAT_RUN 5 #define IOAT_CHAN_ACTIVE 6 struct timer_list timer; - #define COMPLETION_TIMEOUT msecs_to_jiffies(100) - #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *ioat_dma; dma_addr_t completion_dma; diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index e15bd15a9ef6ac572da0086c33bee79ba6251600..e12b754e6398d8cecf2ebaf6cd1444e737341694 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -35,6 +35,13 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id) mcf_chan = &mcf_edma->chans[ch]; spin_lock(&mcf_chan->vchan.lock); + + if (!mcf_chan->edesc) { + /* terminate_all called before */ + spin_unlock(&mcf_chan->vchan.lock); + continue; + } + if (!mcf_chan->edesc->iscyclic) { list_del(&mcf_chan->edesc->vdesc.node); vchan_cookie_complete(&mcf_chan->edesc->vdesc); diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index b218a013c2600f449dac819c2ab11ab6ef74ce5e..8f7ceb698226ca7d29a91e39821e57c27c7863f0 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -586,6 +586,8 @@ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan) desc->residue = usb_dmac_get_current_residue(chan, desc, desc->sg_index - 1); desc->done_cookie = desc->vd.tx.cookie; + desc->vd.tx_result.result = DMA_TRANS_NOERROR; + desc->vd.tx_result.residue = desc->residue; vchan_cookie_complete(&desc->vd); /* Restart the next transfer if this driver has a next desc */ diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index db58d7e4f9fec992b84a9db283d7894e502a952b..c5fa2ef74abc7f0071c868d1ea776422641e7a58 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) ret = pm_runtime_get_sync(tdc2dev(tdc)); if (ret < 0) { + pm_runtime_put_noidle(tdc2dev(tdc)); free_irq(tdc->irq, tdc); return ret; } @@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto rpm_disable; + } ret = tegra_adma_init(tdma); if (ret) diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c index 0b8f3dd6b146313c42e87c9d434ff04da8c8614b..77e8e67d995b3bd2eb4afe02a74e3e6842fdd4dd 100644 --- a/drivers/dma/ti/k3-udma-private.c +++ b/drivers/dma/ti/k3-udma-private.c @@ -42,6 +42,7 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) ud = platform_get_drvdata(pdev); if (!ud) { pr_debug("UDMA has not been probed\n"); + put_device(&pdev->dev); return ERR_PTR(-EPROBE_DEFER); } diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index c91e2dc1bb7262eda458467a84ac8cd72fa046e5..6c879a734360489b905bb08b33e7d14a7c63d9db 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1753,7 +1753,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) dev_err(ud->ddev.dev, "Descriptor pool allocation failed\n"); uc->use_dma_pool = false; - return -ENOMEM; + ret = -ENOMEM; + goto err_cleanup; } } @@ -1773,16 +1774,18 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) ret = udma_get_chan_pair(uc); if (ret) - return ret; + goto err_cleanup; ret = udma_alloc_tx_resources(uc); - if (ret) - return ret; + if (ret) { + udma_put_rchan(uc); + goto err_cleanup; + } ret = udma_alloc_rx_resources(uc); if (ret) { udma_free_tx_resources(uc); - return ret; + goto err_cleanup; } uc->config.src_thread = ud->psil_base + uc->tchan->id; @@ -1800,10 +1803,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) uc->id); ret = udma_alloc_tx_resources(uc); - if (ret) { - uc->config.remote_thread_id = -1; - return ret; - } + if (ret) + goto err_cleanup; uc->config.src_thread = ud->psil_base + uc->tchan->id; uc->config.dst_thread = uc->config.remote_thread_id; @@ -1820,10 +1821,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) uc->id); ret = udma_alloc_rx_resources(uc); - if (ret) { - uc->config.remote_thread_id = -1; - return ret; - } + if (ret) + goto err_cleanup; uc->config.src_thread = uc->config.remote_thread_id; uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | @@ -1838,7 +1837,9 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) /* Can not happen */ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", __func__, uc->id, uc->config.dir); - return -EINVAL; + ret = -EINVAL; + goto err_cleanup; + } /* check if the channel configuration was successful */ @@ -1847,7 +1848,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) if (udma_is_chan_running(uc)) { dev_warn(ud->dev, "chan%d: is running!\n", uc->id); - udma_stop(uc); + udma_reset_chan(uc, false); if (udma_is_chan_running(uc)) { dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); ret = -EBUSY; @@ -1906,8 +1907,6 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) udma_reset_rings(uc); - INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, - udma_check_tx_completion); return 0; err_irq_free: @@ -1919,7 +1918,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) err_res_free: udma_free_tx_resources(uc); udma_free_rx_resources(uc); - +err_cleanup: udma_reset_uchan(uc); if (uc->use_dma_pool) { @@ -3019,7 +3018,6 @@ static void udma_free_chan_resources(struct dma_chan *chan) } cancel_delayed_work_sync(&uc->tx_drain.work); - destroy_delayed_work_on_stack(&uc->tx_drain.work); if (uc->irq_num_ring > 0) { free_irq(uc->irq_num_ring, uc); @@ -3593,7 +3591,7 @@ static int udma_probe(struct platform_device *pdev) return ret; } - ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype); + ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype); if (!ret && ud->atype > 2) { dev_err(dev, "Invalid atype: %u\n", ud->atype); return -EINVAL; @@ -3711,6 +3709,7 @@ static int udma_probe(struct platform_device *pdev) tasklet_init(&uc->vc.task, udma_vchan_complete, (unsigned long)&uc->vc); init_completion(&uc->teardown_completed); + INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); } ret = dma_async_device_register(&ud->ddev); diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index c2f1d4e6630b10351a1cc428671d04ccc46a6b02..feb7fe6f2da76d7244c13d6ea5251f896c91fb8a 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -356,10 +356,7 @@ static struct pstore_info efi_pstore_info = { static __init int efivars_pstore_init(void) { - if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) - return 0; - - if (!efivars_kobject()) + if (!efivars_kobject() || !efivar_supports_writes()) return 0; if (efivars_pstore_disable) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 5114cae4ec9735dad8c0d809176957ce2ba676b2..fdd1db025dbfda2c0a4e5f63b9d3e24fa95cfeb9 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -176,11 +176,13 @@ static struct efivar_operations generic_ops; static int generic_ops_register(void) { generic_ops.get_variable = efi.get_variable; - generic_ops.set_variable = efi.set_variable; - generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; generic_ops.get_next_variable = efi.get_next_variable; generic_ops.query_variable_store = efi_query_variable_store; + if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) { + generic_ops.set_variable = efi.set_variable; + generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking; + } return efivars_register(&generic_efivars, &generic_ops, efi_kobj); } @@ -382,7 +384,8 @@ static int __init efisubsys_init(void) return -ENOMEM; } - if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) { + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | + EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) { efivar_ssdt_load(); error = generic_ops_register(); if (error) @@ -416,7 +419,8 @@ static int __init efisubsys_init(void) err_remove_group: sysfs_remove_group(efi_kobj, &efi_subsys_attr_group); err_unregister: - if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) + if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE | + EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) generic_ops_unregister(); err_put: kobject_put(efi_kobj); diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 26528a46d99e9e9f0db8c3f55dc3e4706bc774f4..dcea137142b3ceb3913024f6aafa363f4747dd3c 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -680,11 +680,8 @@ int efivars_sysfs_init(void) struct kobject *parent_kobj = efivars_kobject(); int error = 0; - if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) - return -ENODEV; - /* No efivars has been registered yet */ - if (!parent_kobj) + if (!parent_kobj || !efivar_supports_writes()) return 0; printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION, diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 4cce372edaf4deae500173999b549e7f535e8528..75daaf20374eec6539e960c0ed3367568994ebc1 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -6,8 +6,7 @@ # enabled, even if doing so doesn't break the build. # cflags-$(CONFIG_X86_32) := -march=i386 -cflags-$(CONFIG_X86_64) := -mcmodel=small \ - $(call cc-option,-maccumulate-outgoing-args) +cflags-$(CONFIG_X86_64) := -mcmodel=small cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \ -fPIC -fno-strict-aliasing -mno-red-zone \ -mno-mmx -mno-sse -fshort-wchar \ diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c index cc89c4d6196f64af0ee185351387e9012cbfa78b..1de9878ddd3a2fbbeea8c00a7fce98f00beb862c 100644 --- a/drivers/firmware/efi/libstub/alignedmem.c +++ b/drivers/firmware/efi/libstub/alignedmem.c @@ -44,7 +44,7 @@ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr, *addr = ALIGN((unsigned long)alloc_addr, align); if (slack > 0) { - int l = (alloc_addr % align) / EFI_PAGE_SIZE; + int l = (alloc_addr & (align - 1)) / EFI_PAGE_SIZE; if (l) { efi_bs_call(free_pages, alloc_addr, slack - l + 1); diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 7f6a57dec51325f9d37cea21d8bd1d00fe0be43d..e5bfac79e5ac972a283f3867b0f0325a22e5e846 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -35,13 +35,16 @@ efi_status_t check_platform_features(void) } /* - * Relocatable kernels can fix up the misalignment with respect to - * MIN_KIMG_ALIGN, so they only require a minimum alignment of EFI_KIMG_ALIGN - * (which accounts for the alignment of statically allocated objects such as - * the swapper stack.) + * Although relocatable kernels can fix up the misalignment with respect to + * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of + * sync with those recorded in the vmlinux when kaslr is disabled but the + * image required relocation anyway. Therefore retain 2M alignment unless + * KASLR is in use. */ -static const u64 min_kimg_align = IS_ENABLED(CONFIG_RELOCATABLE) ? EFI_KIMG_ALIGN - : MIN_KIMG_ALIGN; +static u64 min_kimg_align(void) +{ + return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; +} efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, @@ -74,21 +77,21 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, kernel_size = _edata - _text; kernel_memsize = kernel_size + (_end - _edata); - *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align; + *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align(); if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) { /* * If KASLR is enabled, and we have some randomness available, * locate the kernel at a randomized offset in physical memory. */ - status = efi_random_alloc(*reserve_size, min_kimg_align, + status = efi_random_alloc(*reserve_size, min_kimg_align(), reserve_addr, phys_seed); } else { status = EFI_OUT_OF_RESOURCES; } if (status != EFI_SUCCESS) { - if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align)) { + if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align())) { /* * Just execute from wherever we were loaded by the * UEFI PE/COFF loader if the alignment is suitable. @@ -99,7 +102,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, } status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, - ULONG_MAX, min_kimg_align); + ULONG_MAX, min_kimg_align()); if (status != EFI_SUCCESS) { efi_err("Failed to relocate kernel\n"); @@ -108,7 +111,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, } } - *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align; + *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align(); memcpy((void *)*image_addr, _text, kernel_size); return EFI_SUCCESS; diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index d40fd68c6bb2e97898d06e3800f3346376a6ba9c..6bca70bbb43d0db1689fd57c7660c013d5c5e20e 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -19,7 +19,7 @@ #include "efistub.h" bool efi_nochunk; -bool efi_nokaslr; +bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE); bool efi_noinitrd; int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT; bool efi_novamap; diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index 3318ec3f8e5bcbd62a97d256a7baea2954de04ae..a5a405d8ab4447683b1f7e442d84a1f7056ae8f3 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -121,23 +121,6 @@ static unsigned long get_dram_base(void) return membase; } -/* - * This function handles the architcture specific differences between arm and - * arm64 regarding where the kernel image must be loaded and any memory that - * must be reserved. On failure it is required to free all - * all allocations it has made. - */ -efi_status_t handle_kernel_image(unsigned long *image_addr, - unsigned long *image_size, - unsigned long *reserve_addr, - unsigned long *reserve_size, - unsigned long dram_base, - efi_loaded_image_t *image); - -asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint, - unsigned long fdt_addr, - unsigned long fdt_size); - /* * EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint * that is described in the PE/COFF header. Most of the code is the same diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 2c9d42264c29bc09634f7616b3627f33b0ca3219..85050f5a1b2862fc4d69ec70a996ff26bb308fd2 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -776,6 +776,22 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image, unsigned long *load_size, unsigned long soft_limit, unsigned long hard_limit); +/* + * This function handles the architcture specific differences between arm and + * arm64 regarding where the kernel image must be loaded and any memory that + * must be reserved. On failure it is required to free all + * all allocations it has made. + */ +efi_status_t handle_kernel_image(unsigned long *image_addr, + unsigned long *image_size, + unsigned long *reserve_addr, + unsigned long *reserve_size, + unsigned long dram_base, + efi_loaded_image_t *image); + +asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint, + unsigned long fdt_addr, + unsigned long fdt_size); void efi_handle_post_ebs_state(void); diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index 5a48d996ed710471a1c75531472f4806c59b3d9a..3672539cb96eb3043d66adb12122d0502e2ba554 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -361,8 +362,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, int options_size = 0; efi_status_t status; char *cmdline_ptr; - unsigned long ramdisk_addr; - unsigned long ramdisk_size; efi_system_table = sys_table_arg; @@ -390,8 +389,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, hdr = &boot_params->hdr; - /* Copy the second sector to boot_params */ - memcpy(&hdr->jump, image_base + 512, 512); + /* Copy the setup header from the second sector to boot_params */ + memcpy(&hdr->jump, image_base + 512, + sizeof(struct setup_header) - offsetof(struct setup_header, jump)); /* * Fill out some of the header fields ourselves because the diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index 5f2a4d162795c2ddfbcf122b9ad82963f3185db5..973eef234b365e530576fe09cc407cbb6e0d1366 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -1229,3 +1229,9 @@ int efivars_unregister(struct efivars *efivars) return rv; } EXPORT_SYMBOL_GPL(efivars_unregister); + +int efivar_supports_writes(void) +{ + return __efivars && __efivars->ops->set_variable; +} +EXPORT_SYMBOL_GPL(efivar_supports_writes); diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index 873841af8d5758b34c2369269985dee7df290322..3d6ba425dbb9f2f60f32900b0d107fb5e1797bfc 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), GFP_KERNEL); - if (!cpu_groups) + if (!cpu_groups) { + free_cpumask_var(tmp); return -ENOMEM; + } cpumask_copy(tmp, cpu_online_mask); @@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) topology_core_cpumask(cpumask_any(tmp)); if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { + free_cpumask_var(tmp); free_cpu_groups(num_groups, &cpu_groups); return -ENOMEM; } @@ -196,13 +199,12 @@ static int hotplug_tests(void) if (!page_buf) goto out_free_cpu_groups; - err = 0; /* * Of course the last CPU cannot be powered down and cpu_down() should * refuse doing that. */ pr_info("Trying to turn off and on again all CPUs\n"); - err += down_and_up_cpus(cpu_online_mask, offlined_cpus); + err = down_and_up_cpus(cpu_online_mask, offlined_cpus); /* * Take down CPUs by cpu group this time. When the last CPU is turned diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 039e0f91dba8f5229da95bbaf6fcc9fc55349d27..6945c3c966375a2a7c96991d3094fad5dfd194c7 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) - goto err_register; + if (err) { + kobject_put(&entry->kobj); + return err; + } /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); @@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) err_add_raw: kobject_del(&entry->kobj); -err_register: kfree(entry); return err; } diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index b0c31789a9096b81e7188397cb9ae6e3017afff9..3fa2c59921733a706d32e25d2597e3a29d292fed 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev) * on this port and minimum soft reset pulse width has elapsed. * Driver polls port_soft_reset_ack to determine if reset done by HW. */ - if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST, + if (readq_poll_timeout(base + PORT_HDR_CTRL, v, + v & PORT_CTRL_SFTRST_ACK, RST_POLL_INVL, RST_POLL_TIMEOUT)) { dev_err(&pdev->dev, "timeout, fail to reset device\n"); return -ETIMEDOUT; diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 538755062ab7ca7d8fee6911ca2af02a8afc7a37..a78c409bf2c4490598c3a8838b37d6c45bb3348e 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) { struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); struct dfl_fpga_cdev *cdev = drvdata->cdev; - int ret = 0; if (!num_vfs) { /* @@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) dfl_fpga_cdev_config_ports_pf(cdev); } else { + int ret; + /* * before enable SRIOV, put released ports into VF access mode * first of all. diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c index 5640efe5e75049a3c3b44f759126dd5bab740181..5bda38e0780f24badccd5167c0db701b2e02640e 100644 --- a/drivers/gpio/gpio-arizona.c +++ b/drivers/gpio/gpio-arizona.c @@ -64,6 +64,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) ret = pm_runtime_get_sync(chip->parent); if (ret < 0) { dev_err(chip->parent, "Failed to resume: %d\n", ret); + pm_runtime_put_autosuspend(chip->parent); return ret; } @@ -72,12 +73,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset) if (ret < 0) { dev_err(chip->parent, "Failed to drop cache: %d\n", ret); + pm_runtime_put_autosuspend(chip->parent); return ret; } ret = regmap_read(arizona->regmap, reg, &val); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(chip->parent); return ret; + } pm_runtime_mark_last_busy(chip->parent); pm_runtime_put_autosuspend(chip->parent); @@ -106,6 +110,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip, ret = pm_runtime_get_sync(chip->parent); if (ret < 0) { dev_err(chip->parent, "Failed to resume: %d\n", ret); + pm_runtime_put(chip->parent); return ret; } } diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 1fca8dd7824fdf592efd4f7d01e57503284e815d..a3b9bdedbe443503778ece0a4f85f4a47187dd6c 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -107,6 +107,84 @@ static const struct i2c_device_id pca953x_id[] = { }; MODULE_DEVICE_TABLE(i2c, pca953x_id); +#ifdef CONFIG_GPIO_PCA953X_IRQ + +#include +#include +#include + +static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = { + { + /* + * On Intel Galileo Gen 2 board the IRQ pin of one of + * the I²C GPIO expanders, which has GpioInt() resource, + * is provided as an absolute number instead of being + * relative. Since first controller (gpio-sch.c) and + * second (gpio-dwapb.c) are at the fixed bases, we may + * safely refer to the number in the global space to get + * an IRQ out of it. + */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"), + }, + }, + {} +}; + +#ifdef CONFIG_ACPI +static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data) +{ + struct acpi_resource_gpio *agpio; + int *pin = data; + + if (acpi_gpio_get_irq_resource(ares, &agpio)) + *pin = agpio->pin_table[0]; + return 1; +} + +static int pca953x_acpi_find_pin(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + int pin = -ENOENT, ret; + LIST_HEAD(r); + + ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin); + acpi_dev_free_resource_list(&r); + if (ret < 0) + return ret; + + return pin; +} +#else +static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; } +#endif + +static int pca953x_acpi_get_irq(struct device *dev) +{ + int pin, ret; + + pin = pca953x_acpi_find_pin(dev); + if (pin < 0) + return pin; + + dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin); + + if (!gpio_is_valid(pin)) + return -EINVAL; + + ret = gpio_request(pin, "pca953x interrupt"); + if (ret) + return ret; + + ret = gpio_to_irq(pin); + + /* When pin is used as an IRQ, no need to keep it requested */ + gpio_free(pin); + + return ret; +} +#endif + static const struct acpi_device_id pca953x_acpi_ids[] = { { "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, }, { } @@ -322,6 +400,7 @@ static const struct regmap_config pca953x_ai_i2c_regmap = { .writeable_reg = pca953x_writeable_register, .volatile_reg = pca953x_volatile_register, + .disable_locking = true, .cache_type = REGCACHE_RBTREE, .max_register = 0x7f, }; @@ -623,8 +702,6 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d) DECLARE_BITMAP(reg_direction, MAX_LINE); int level; - pca953x_read_regs(chip, chip->regs->direction, reg_direction); - if (chip->driver_data & PCA_PCAL) { /* Enable latch on interrupt-enabled inputs */ pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask); @@ -635,7 +712,11 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d) pca953x_write_regs(chip, PCAL953X_INT_MASK, irq_mask); } + /* Switch direction to input if needed */ + pca953x_read_regs(chip, chip->regs->direction, reg_direction); + bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio); + bitmap_complement(reg_direction, reg_direction, gc->ngpio); bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio); /* Look for any newly setup interrupt */ @@ -734,14 +815,16 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid) struct gpio_chip *gc = &chip->gpio_chip; DECLARE_BITMAP(pending, MAX_LINE); int level; + bool ret; - if (!pca953x_irq_pending(chip, pending)) - return IRQ_NONE; + mutex_lock(&chip->i2c_lock); + ret = pca953x_irq_pending(chip, pending); + mutex_unlock(&chip->i2c_lock); for_each_set_bit(level, pending, gc->ngpio) handle_nested_irq(irq_find_mapping(gc->irq.domain, level)); - return IRQ_HANDLED; + return IRQ_RETVAL(ret); } static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base) @@ -752,6 +835,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base) DECLARE_BITMAP(irq_stat, MAX_LINE); int ret; + if (dmi_first_match(pca953x_dmi_acpi_irq_info)) { + ret = pca953x_acpi_get_irq(&client->dev); + if (ret > 0) + client->irq = ret; + } + if (!client->irq) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index d33cb344be69f5266b4933137151ffa5feb5117a..a414da22a359c03406e31a28db84867016cc0049 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1295,27 +1295,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) { struct amdgpu_job *job; - struct drm_sched_job *s_job; + struct drm_sched_job *s_job, *tmp; uint32_t preempt_seq; struct dma_fence *fence, **ptr; struct amdgpu_fence_driver *drv = &ring->fence_drv; struct drm_gpu_scheduler *sched = &ring->sched; + bool preempted = true; if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) return; preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); - if (preempt_seq <= atomic_read(&drv->last_seq)) - return; + if (preempt_seq <= atomic_read(&drv->last_seq)) { + preempted = false; + goto no_preempt; + } preempt_seq &= drv->num_fences_mask; ptr = &drv->fences[preempt_seq]; fence = rcu_dereference_protected(*ptr, 1); +no_preempt: spin_lock(&sched->job_list_lock); - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + if (dma_fence_is_signaled(&s_job->s_fence->finished)) { + /* remove job from ring_mirror_list */ + list_del_init(&s_job->node); + sched->ops->free_job(s_job); + continue; + } job = to_amdgpu_job(s_job); - if (job->fence == fence) + if (preempted && job->fence == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 47207188c5692ad068217793a957471e13c3a035..4fb4c3b696876869d3128333b47e6d937e117e68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -37,7 +37,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) memset(&ti, 0, sizeof(struct amdgpu_task_info)); - if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { + if (amdgpu_gpu_recovery && + amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { DRM_ERROR("ring %s timeout, but soft recovered\n", s_job->sched->name); return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d7e17e34fee171a2d103fe79a2fc27ac9d68c95c..21292098bc023d09b29470b20b66261510eba765 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return n ? -EFAULT : 0; } case AMDGPU_INFO_DEV_INFO: { - struct drm_amdgpu_info_device dev_info = {}; + struct drm_amdgpu_info_device dev_info; uint64_t vm_size; + memset(&dev_info, 0, sizeof(dev_info)); dev_info.device_id = dev->pdev->device; dev_info.chip_rev = adev->rev_id; dev_info.external_rev = adev->external_rev_id; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 7301fdcfb8bce2ded20f147dbd80111af2349858..ef3269c43d4f49ab3c8dd652f519ee6dbe147342 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -372,6 +372,52 @@ static int psp_tmr_load(struct psp_context *psp) return ret; } +static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp, + struct psp_gfx_cmd_resp *cmd) +{ + if (amdgpu_sriov_vf(psp->adev)) + cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR; + else + cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR; +} + +static int psp_tmr_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_tmr_unload_cmd_buf(psp, cmd); + DRM_INFO("free PSP TMR buffer\n"); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static int psp_tmr_terminate(struct psp_context *psp) +{ + int ret; + void *tmr_buf; + void **pptr; + + ret = psp_tmr_unload(psp); + if (ret) + return ret; + + /* free TMR memory buffer */ + pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; + amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); + + return 0; +} + static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, uint64_t asd_mc, uint32_t size) { @@ -1779,8 +1825,6 @@ static int psp_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - void *tmr_buf; - void **pptr; if (psp->adev->psp.ta_fw) { psp_ras_terminate(psp); @@ -1790,10 +1834,9 @@ static int psp_hw_fini(void *handle) psp_asd_unload(psp); + psp_tmr_terminate(psp); psp_ring_destroy(psp, PSP_RING_TYPE__KM); - pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; - amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr); amdgpu_bo_free_kernel(&psp->fw_pri_bo, &psp->fw_pri_mc_addr, &psp->fw_pri_buf); amdgpu_bo_free_kernel(&psp->fence_buf_bo, @@ -1840,6 +1883,18 @@ static int psp_suspend(void *handle) } } + ret = psp_asd_unload(psp); + if (ret) { + DRM_ERROR("Failed to unload asd\n"); + return ret; + } + + ret = psp_tmr_terminate(psp); + if (ret) { + DRM_ERROR("Falied to terminate tmr\n"); + return ret; + } + ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); if (ret) { DRM_ERROR("PSP ring stop failed\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index bd5dd4f6431103b8fded2463a37717870da6f6f8..fac77a86c04b2e35866cc7aa1c99dc7ea20de943 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7513,12 +7513,17 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *kiq_ring = &kiq->ring; + unsigned long flags; if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; - if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); return -ENOMEM; + } /* assert preemption condition */ amdgpu_ring_set_preempt_cond_exec(ring, false); @@ -7529,6 +7534,8 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) ++ring->trail_seq); amdgpu_ring_commit(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + /* poll the trailing fence */ for (i = 0; i < adev->usec_timeout; i++) { if (ring->trail_seq == diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 5d71c23e264076a90084b41e7c96e000e44fc5eb..8fb66e50a57b4561560b61eafd0a97a410ce4fe6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -314,30 +314,20 @@ static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring) static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u64 *wptr = NULL; - uint64_t local_wptr = 0; + u64 wptr; if (ring->use_doorbell) { /* XXX check if swapping is necessary on BE */ - wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); - DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr); - *wptr = (*wptr) >> 2; - DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr); + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); + DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); } else { - u32 lowbit, highbit; - - wptr = &local_wptr; - lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; - highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; - - DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", - ring->me, highbit, lowbit); - *wptr = highbit; - *wptr = (*wptr) << 32; - *wptr |= lowbit; + wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)); + wptr = wptr << 32; + wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)); + DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr); } - return *wptr; + return wptr >> 2; } /** diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 10ac8076d4f244a80e91dd05733b426ab0ebf31f..710edc70e37ece305d614dce67017353b92c65b1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -974,6 +974,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) /* Update the actual used number of crtc */ adev->mode_info.num_crtc = adev->dm.display_indexes_num; + /* create fake encoders for MST */ + dm_dp_create_fake_mst_encoders(adev); + /* TODO: Add_display_info? */ /* TODO use dynamic cursor width */ @@ -997,6 +1000,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) static void amdgpu_dm_fini(struct amdgpu_device *adev) { + int i; + + for (i = 0; i < adev->dm.display_indexes_num; i++) { + drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); + } + amdgpu_dm_audio_fini(adev); amdgpu_dm_destroy_drm_device(&adev->dm); @@ -1358,7 +1367,7 @@ static int dm_late_init(void *handle) struct dmcu *dmcu = NULL; bool ret; - if (!adev->dm.fw_dmcu) + if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw) return detect_mst_link_for_all_connectors(adev->ddev); dmcu = adev->dm.dc->res_pool->dmcu; @@ -2010,6 +2019,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) struct amdgpu_display_manager *dm; struct drm_connector *conn_base; struct amdgpu_device *adev; + struct dc_link *link = NULL; static const u8 pre_computed_values[] = { 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; @@ -2017,6 +2027,10 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) if (!aconnector || !aconnector->dc_link) return; + link = aconnector->dc_link; + if (link->connector_signal != SIGNAL_TYPE_EDP) + return; + conn_base = &aconnector->base; adev = conn_base->dev->dev_private; dm = &adev->dm; @@ -8703,20 +8717,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * the same resource. If we have a new DC context as part of * the DM atomic state from validation we need to free it and * retain the existing one instead. + * + * Furthermore, since the DM atomic state only contains the DC + * context and can safely be annulled, we can free the state + * and clear the associated private object now to free + * some memory and avoid a possible use-after-free later. */ - struct dm_atomic_state *new_dm_state, *old_dm_state; - new_dm_state = dm_atomic_get_new_state(state); - old_dm_state = dm_atomic_get_old_state(state); + for (i = 0; i < state->num_private_objs; i++) { + struct drm_private_obj *obj = state->private_objs[i].ptr; - if (new_dm_state && old_dm_state) { - if (new_dm_state->context) - dc_release_state(new_dm_state->context); + if (obj->funcs == adev->dm.atomic_obj.funcs) { + int j = state->num_private_objs-1; - new_dm_state->context = old_dm_state->context; + dm_atomic_destroy_state(obj, + state->private_objs[i].state); + + /* If i is not at the end of the array then the + * last element needs to be moved to where i was + * before the array can safely be truncated. + */ + if (i != j) + state->private_objs[i] = + state->private_objs[j]; - if (old_dm_state->context) - dc_retain_state(old_dm_state->context); + state->private_objs[j].ptr = NULL; + state->private_objs[j].state = NULL; + state->private_objs[j].old_state = NULL; + state->private_objs[j].new_state = NULL; + + state->num_private_objs = j; + break; + } } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index d61186ff411d8535708db1abc6f747256e9c16d0..648180ccdc2ebe748375661a8f33606353f65315 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -43,6 +43,9 @@ */ #define AMDGPU_DM_MAX_DISPLAY_INDEX 31 + +#define AMDGPU_DM_MAX_CRTC 6 + /* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" @@ -328,6 +331,13 @@ struct amdgpu_display_manager { * available in FW */ const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; + + /** + * @mst_encoders: + * + * fake encoders used for DP MST. + */ + struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; }; struct amdgpu_dm_connector { @@ -356,7 +366,6 @@ struct amdgpu_dm_connector { struct amdgpu_dm_dp_aux dm_dp_aux; struct drm_dp_mst_port *port; struct amdgpu_dm_connector *mst_port; - struct amdgpu_encoder *mst_encoder; struct drm_dp_aux *dsc_aux; /* TODO see if we can merge with ddc_bus or make a dm_connector */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index ae0a7ef1d595a6e226754a007e6c1f163cad254b..e5ecc5affa1eb7c46eb2005afa09d2c7b1aca548 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -95,7 +95,6 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder; if (aconnector->dc_sink) { dc_link_remove_remote_sink(aconnector->dc_link, @@ -105,8 +104,6 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) kfree(aconnector->edid); - drm_encoder_cleanup(&amdgpu_encoder->base); - kfree(amdgpu_encoder); drm_connector_cleanup(connector); drm_dp_mst_put_port_malloc(aconnector->port); kfree(aconnector); @@ -243,7 +240,11 @@ static struct drm_encoder * dm_mst_atomic_best_encoder(struct drm_connector *connector, struct drm_connector_state *connector_state) { - return &to_amdgpu_dm_connector(connector)->mst_encoder->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); + + return &adev->dm.mst_encoders[acrtc->crtc_id].base; } static int @@ -306,31 +307,27 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { .destroy = amdgpu_dm_encoder_destroy, }; -static struct amdgpu_encoder * -dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) +void +dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) { - struct drm_device *dev = connector->base.dev; - struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_encoder *amdgpu_encoder; - struct drm_encoder *encoder; - - amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); - if (!amdgpu_encoder) - return NULL; + struct drm_device *dev = adev->ddev; + int i; - encoder = &amdgpu_encoder->base; - encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); + for (i = 0; i < adev->dm.display_indexes_num; i++) { + struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i]; + struct drm_encoder *encoder = &amdgpu_encoder->base; - drm_encoder_init( - dev, - &amdgpu_encoder->base, - &amdgpu_dm_encoder_funcs, - DRM_MODE_ENCODER_DPMST, - NULL); + encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); - drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); + drm_encoder_init( + dev, + &amdgpu_encoder->base, + &amdgpu_dm_encoder_funcs, + DRM_MODE_ENCODER_DPMST, + NULL); - return amdgpu_encoder; + drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); + } } static struct drm_connector * @@ -343,6 +340,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct amdgpu_device *adev = dev->dev_private; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + int i; aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) @@ -369,9 +367,10 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, master->dc_link, master->connector_id); - aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master); - drm_connector_attach_encoder(&aconnector->base, - &aconnector->mst_encoder->base); + for (i = 0; i < adev->dm.display_indexes_num; i++) { + drm_connector_attach_encoder(&aconnector->base, + &adev->dm.mst_encoders[i].base); + } connector->max_bpc_property = master->base.max_bpc_property; if (connector->max_bpc_property) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index d2c56579a2cc412a3074ba0a089f29b891492c77..b38bd68121ceb56cae8fbb847ff7852d08bff03e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -35,6 +35,9 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, int link_index); +void +dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); + #if defined(CONFIG_DRM_AMD_DC_DCN) bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_state *dc_state); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 4f0e7203dba4f41fd6fa4419b11f99aefc7d97c1..470c82794f6f810eee75c7b3be05b0ba324bf055 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -56,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink) } } -static void dc_stream_construct(struct dc_stream_state *stream, +static bool dc_stream_construct(struct dc_stream_state *stream, struct dc_sink *dc_sink_data) { uint32_t i = 0; @@ -118,11 +118,17 @@ static void dc_stream_construct(struct dc_stream_state *stream, update_stream_signal(stream, dc_sink_data); stream->out_transfer_func = dc_create_transfer_func(); + if (stream->out_transfer_func == NULL) { + dc_sink_release(dc_sink_data); + return false; + } stream->out_transfer_func->type = TF_TYPE_BYPASS; stream->out_transfer_func->ctx = stream->ctx; stream->stream_id = stream->ctx->dc_stream_id_count; stream->ctx->dc_stream_id_count++; + + return true; } static void dc_stream_destruct(struct dc_stream_state *stream) @@ -164,13 +170,20 @@ struct dc_stream_state *dc_create_stream_for_sink( stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL); if (stream == NULL) - return NULL; + goto alloc_fail; - dc_stream_construct(stream, sink); + if (dc_stream_construct(stream, sink) == false) + goto construct_fail; kref_init(&stream->refcount); return stream; + +construct_fail: + kfree(stream); + +alloc_fail: + return NULL; } struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 67476047c067d1d0f99b7ecba83ca2a00ed90dca..fbb3f3a0dff73f9324f1ae12e83daec8269d35c2 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -689,7 +689,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u return -EINVAL; } - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, 1 << workload_type, NULL); if (ret) { diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 3da71a088b925f0d93a37661366b3c0b5fbaadfb..0ecc18b55ffb2e304e0daf4638e2e0b964aa7725 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -644,9 +644,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, /* sclk is bigger than max sclk in the dependence table */ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; - vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), - (dep_table->entries[i - 1].vddc - - (uint16_t)VDDC_VDDCI_DELTA)); if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) *voltage |= (data->vbios_boot_state.vddci_bootup_value * @@ -654,8 +651,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, else if (dep_table->entries[i - 1].vddci) *voltage |= (dep_table->entries[i - 1].vddci * VOLTAGE_SCALE) << VDDC_SHIFT; - else + else { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i - 1].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 6b27242b9ee3c1a55b856bb58d456c5307e534a3..bca3fcff16ec6ee5d9fe29c252c962444efa2a6f 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -173,8 +173,6 @@ static int aspeed_gfx_load(struct drm_device *drm) drm_mode_config_reset(drm); - drm_fbdev_generic_setup(drm, 32); - return 0; } @@ -225,6 +223,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev) if (ret) goto err_unload; + drm_fbdev_generic_setup(&priv->drm, 32); return 0; err_unload: diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 05d8373888e81d48250c618f2e821555436062c1..079f46f5cdb62a8e7989f58e3d8d686f3c16a61b 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -146,6 +146,7 @@ int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; bochs->dev->mode_config.prefer_shadow_fbdev = 1; + bochs->dev->mode_config.fbdev_use_iomem = true; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.funcs = &bochs_mode_funcs; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 87b58c1acff4af5ef61de7a45197b2d05bec46d4..648eb23d078481069e54fa30ed42433b4f749464 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1224,6 +1224,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) adv7511->bridge.funcs = &adv7511_bridge_funcs; adv7511->bridge.of_node = dev->of_node; + adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA; drm_bridge_add(&adv7511->bridge); diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index b14d725bf609d55c46aa07f42d291c7b9401de20..c7bc194bbce37f4cbe12a3afac0e99b54e43fe4e 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -917,11 +917,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, struct drm_panel *panel; int ret; - if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { - DRM_ERROR("Fix bridge driver to make connector optional!"); - return -EINVAL; - } - ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel, &panel_bridge); if (ret) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 5609e164805fa7077450eb78b34ab23e3692f1e2..89cfd68ef4000052fee2d836336e2ee5e96ab887 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -399,7 +399,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, unsigned int y; for (y = clip->y1; y < clip->y2; y++) { - memcpy(dst, src, len); + if (!fb_helper->dev->mode_config.fbdev_use_iomem) + memcpy(dst, src, len); + else + memcpy_toio((void __iomem *)dst, src, len); + src += fb->pitches[0]; dst += fb->pitches[0]; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 7bf628e130232e69a2ba0674ea2843f4e17ebd6a..ee2058ad482c492f08c2b2655ebfde8c06bb6359 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -871,9 +871,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, * @file_priv: drm file-private structure * * Open an object using the global name, returning a handle and the size. - * - * This handle (of course) holds a reference to the object, so the object - * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, @@ -898,14 +895,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, &handle); - drm_gem_object_put_unlocked(obj); if (ret) - return ret; + goto err; args->handle = handle; args->size = obj->size; - return 0; +err: + drm_gem_object_put_unlocked(obj); + return ret; } /** diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index bb27c82757f17caa3b29c58b75438b518eaf3d6e..bf7888ad9ad4c2a3d56f3ca18bc6131fd3601883 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -923,7 +923,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc, } } - tr.len = chunk; + tr.len = chunk * 2; len -= chunk; ret = spi_sync(spi, &m); diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index b50b44e7627975522b33b76f2d854c3fe2f3eb2f..8fc3f67e3e760e39e057fc7c2a7052d5408f287e 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type( * configurations by passing the endpoints explicitly to * drm_of_lvds_get_dual_link_pixel_order(). */ - if (!current_pt || pixels_type != current_pt) { - of_node_put(remote_port); + if (!current_pt || pixels_type != current_pt) return -EINVAL; - } } return pixels_type; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index a6fd0c29e5b89cc5c657cb2c81c8b8a165f10ca9..544b9993c99ed5fc2ca68c3bc53fd63748c3a6ea 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -307,8 +307,6 @@ static int hibmc_load(struct drm_device *dev) /* reset all the states of crtc/plane/encoder/connector */ drm_mode_config_reset(dev); - drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); - return 0; err: @@ -355,6 +353,9 @@ static int hibmc_pci_probe(struct pci_dev *pdev, ret); goto err_unload; } + + drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); + return 0; err_unload: diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 9ea1a397d1b54e813822a42eb5cf05a85b7fe876..26996e1839e2232637c331111666c6422e3f810e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3822,6 +3822,17 @@ skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, return true; } +unsigned int +intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) +{ + int x = 0, y = 0; + + intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, + plane_state->color_plane[0].offset, 0); + + return y; +} + static int skl_check_main_surface(struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index efb4da205ea292e7a7cecb819249bc1694e66223..3a06f72c985965818cc2068f3e77538de33c0621 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -608,6 +608,7 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation); int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); +unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); struct intel_display_error_state * intel_display_capture_error_state(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 1c26673acb2dd8bd603c427a1eddae38b86761e6..412572f88b67bbf9ebcdaff4dbd3c2b08175ee6d 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -47,19 +47,6 @@ #include "intel_fbc.h" #include "intel_frontbuffer.h" -/* - * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the - * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's - * origin so the x and y offsets can actually fit the registers. As a - * consequence, the fence doesn't really start exactly at the display plane - * address we program because it starts at the real start of the buffer, so we - * have to take this into consideration here. - */ -static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc) -{ - return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y; -} - /* * For SKL+, the plane source size used by the hardware is based on the value we * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value @@ -141,7 +128,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) fbc_ctl2 |= FBC_CTL_CPU_FENCE; intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); intel_de_write(dev_priv, FBC_FENCE_OFF, - params->crtc.fence_y_offset); + params->fence_y_offset); } /* enable it... */ @@ -175,7 +162,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv) if (params->fence_id >= 0) { dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; intel_de_write(dev_priv, DPFC_FENCE_YOFF, - params->crtc.fence_y_offset); + params->fence_y_offset); } else { intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); } @@ -243,7 +230,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) intel_de_write(dev_priv, SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | params->fence_id); intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, - params->crtc.fence_y_offset); + params->fence_y_offset); } } else { if (IS_GEN(dev_priv, 6)) { @@ -253,7 +240,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) } intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, - params->crtc.fence_y_offset); + params->fence_y_offset); /* enable it... */ intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -320,7 +307,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) intel_de_write(dev_priv, SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | params->fence_id); intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, - params->crtc.fence_y_offset); + params->fence_y_offset); } else if (dev_priv->ggtt.num_fences) { intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); @@ -631,8 +618,8 @@ static bool rotation_is_valid(struct drm_i915_private *dev_priv, /* * For some reason, the hardware tracking starts looking at whatever we * programmed as the display plane base address register. It does not look at - * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y} - * variables instead of just looking at the pipe/plane size. + * the X and Y offset registers. That's why we include the src x/y offsets + * instead of just looking at the plane size. */ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) { @@ -705,7 +692,6 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; cache->plane.adjusted_x = plane_state->color_plane[0].x; cache->plane.adjusted_y = plane_state->color_plane[0].y; - cache->plane.y = plane_state->uapi.src.y1 >> 16; cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; @@ -713,6 +699,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->fb.stride = fb->pitches[0]; cache->fb.modifier = fb->modifier; + cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); + drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && !plane_state->vma->fence); @@ -731,6 +719,25 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) fbc->compressed_fb.size * fbc->threshold; } +static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && + cache->fb.modifier != I915_FORMAT_MOD_X_TILED) + return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; + else + return 0; +} + +static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv); +} + static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; @@ -883,12 +890,13 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, memset(params, 0, sizeof(*params)); params->fence_id = cache->fence_id; + params->fence_y_offset = cache->fence_y_offset; params->crtc.pipe = crtc->pipe; params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; - params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc); params->fb.format = cache->fb.format; + params->fb.modifier = cache->fb.modifier; params->fb.stride = cache->fb.stride; params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); @@ -918,6 +926,9 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) if (params->fb.format != cache->fb.format) return false; + if (params->fb.modifier != cache->fb.modifier) + return false; + if (params->fb.stride != cache->fb.stride) return false; @@ -1197,7 +1208,8 @@ void intel_fbc_enable(struct intel_atomic_state *state, if (fbc->crtc) { if (fbc->crtc != crtc || - !intel_fbc_cfb_size_changed(dev_priv)) + (!intel_fbc_cfb_size_changed(dev_priv) && + !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv))) goto out; __intel_fbc_disable(dev_priv); @@ -1219,12 +1231,7 @@ void intel_fbc_enable(struct intel_atomic_state *state, goto out; } - if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && - plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) - cache->gen9_wa_cfb_stride = - DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; - else - cache->gen9_wa_cfb_stride = 0; + cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv); drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 010f3724071031b565d946ae91e9ddbeb2e4c472..95b6d94579101232b2a23980cf44fb1f8a932971 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2867,19 +2867,13 @@ intel_hdmi_connector_register(struct drm_connector *connector) return ret; } -static void intel_hdmi_destroy(struct drm_connector *connector) +static void intel_hdmi_connector_unregister(struct drm_connector *connector) { struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier; cec_notifier_conn_unregister(n); - intel_connector_destroy(connector); -} - -static void intel_hdmi_connector_unregister(struct drm_connector *connector) -{ intel_hdmi_remove_i2c_symlink(connector); - intel_connector_unregister(connector); } @@ -2891,7 +2885,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_hdmi_connector_register, .early_unregister = intel_hdmi_connector_unregister, - .destroy = intel_hdmi_destroy, + .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index e4aece20bc8089b0cff67256c12f0e7153ea2294..52db2bde44a3ab279b433847511c49afa901381c 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -204,25 +204,25 @@ static int __ring_active(struct intel_ring *ring) { int err; - err = i915_active_acquire(&ring->vma->active); + err = intel_ring_pin(ring); if (err) return err; - err = intel_ring_pin(ring); + err = i915_active_acquire(&ring->vma->active); if (err) - goto err_active; + goto err_pin; return 0; -err_active: - i915_active_release(&ring->vma->active); +err_pin: + intel_ring_unpin(ring); return err; } static void __ring_retire(struct intel_ring *ring) { - intel_ring_unpin(ring); i915_active_release(&ring->vma->active); + intel_ring_unpin(ring); } __i915_active_call diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7c3d8ef4a47ce7ad43b13987de2bb491b8149ba6..cb07e1d2a353e172e321d1f3110c7c63fe257be0 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -5396,13 +5396,8 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve) * typically be the first we inspect for submission. */ swp = prandom_u32_max(ve->num_siblings); - if (!swp) - return; - - swap(ve->siblings[swp], ve->siblings[0]); - if (!intel_engine_has_relative_mmio(ve->siblings[0])) - virtual_update_register_offsets(ve->context.lrc_reg_state, - ve->siblings[0]); + if (swp) + swap(ve->siblings[swp], ve->siblings[0]); } static int virtual_context_alloc(struct intel_context *ce) @@ -5415,15 +5410,9 @@ static int virtual_context_alloc(struct intel_context *ce) static int virtual_context_pin(struct intel_context *ce) { struct virtual_engine *ve = container_of(ce, typeof(*ve), context); - int err; /* Note: we must use a real engine class for setting up reg state */ - err = __execlists_context_pin(ce, ve->siblings[0]); - if (err) - return err; - - virtual_engine_initial_hint(ve); - return 0; + return __execlists_context_pin(ce, ve->siblings[0]); } static void virtual_context_enter(struct intel_context *ce) @@ -5688,6 +5677,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); intel_engine_init_breadcrumbs(&ve->base); intel_engine_init_execlists(&ve->base); + ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */ ve->base.cops = &virtual_context_ops; ve->base.request_alloc = execlists_request_alloc; @@ -5769,6 +5759,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, ve->base.flags |= I915_ENGINE_IS_VIRTUAL; + virtual_engine_initial_hint(ve); return &ve->context; err_put: diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 5049c3dd08a6a6d11f709371e1f815b7e8f382b4..c91981e75ebf7c2d4e453be0a74a7144bc65057e 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -44,9 +44,9 @@ static int cmp_u64(const void *A, const void *B) { const u64 *a = A, *b = B; - if (a < b) + if (*a < *b) return -1; - else if (a > b) + else if (*a > *b) return 1; else return 0; @@ -56,9 +56,9 @@ static int cmp_u32(const void *A, const void *B) { const u32 *a = A, *b = B; - if (a < b) + if (*a < *b) return -1; - else if (a > b) + else if (*a > *b) return 1; else return 0; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index bca036ac662129414d38168f17fd85db68b684e8..e7532e7d74e91bbbddfe0b76ad630a1b1a35d430 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -230,7 +230,7 @@ static int per_file_stats(int id, void *ptr, void *data) struct file_stats *stats = data; struct i915_vma *vma; - if (!kref_get_unless_zero(&obj->base.refcount)) + if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount)) return 0; stats->count++; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index adb9bf34cf97a3bb21690c8ec73bbeb382a0f7b3..ae99a91902002ff0f006d2e4ebdd5f63c2e496df 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -410,8 +410,6 @@ struct intel_fbc { int adjusted_x; int adjusted_y; - int y; - u16 pixel_blend_mode; } plane; @@ -420,6 +418,8 @@ struct intel_fbc { unsigned int stride; u64 modifier; } fb; + + unsigned int fence_y_offset; u16 gen9_wa_cfb_stride; s8 fence_id; } state_cache; @@ -435,15 +435,16 @@ struct intel_fbc { struct { enum pipe pipe; enum i9xx_plane_id i9xx_plane; - unsigned int fence_y_offset; } crtc; struct { const struct drm_format_info *format; unsigned int stride; + u64 modifier; } fb; int cfb_size; + unsigned int fence_y_offset; u16 gen9_wa_cfb_stride; s8 fence_id; bool plane_visible; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 25329b7600c958ffff35545ac18c658f6beb7c8b..014f34c047d57eeefd37a3e064363e90bdbca7cc 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1592,6 +1592,7 @@ static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, u32 d; cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; + cmd |= MI_SRM_LRM_GLOBAL_GTT; if (INTEL_GEN(stream->perf->i915) >= 8) cmd++; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index fc14ebf9a0b709ce029d5409016954156184e537..1f9cd33b35cb33afa3cd90d02f8b433398af8f52 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -104,6 +104,7 @@ vma_create(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_ggtt_view *view) { + struct i915_vma *pos = ERR_PTR(-E2BIG); struct i915_vma *vma; struct rb_node *rb, **p; @@ -184,7 +185,6 @@ vma_create(struct drm_i915_gem_object *obj, rb = NULL; p = &obj->vma.tree.rb_node; while (*p) { - struct i915_vma *pos; long cmp; rb = *p; @@ -196,16 +196,12 @@ vma_create(struct drm_i915_gem_object *obj, * and dispose of ours. */ cmp = i915_vma_compare(pos, vm, view); - if (cmp == 0) { - spin_unlock(&obj->vma.lock); - i915_vma_free(vma); - return pos; - } - if (cmp < 0) p = &rb->rb_right; - else + else if (cmp > 0) p = &rb->rb_left; + else + goto err_unlock; } rb_link_node(&vma->obj_node, rb, p); rb_insert_color(&vma->obj_node, &obj->vma.tree); @@ -228,8 +224,9 @@ vma_create(struct drm_i915_gem_object *obj, err_unlock: spin_unlock(&obj->vma.lock); err_vma: + i915_vm_put(vm); i915_vma_free(vma); - return ERR_PTR(-E2BIG); + return pos; } static struct i915_vma * diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c index 33f01383409c0e80f8f0fd8805ce06f35cb02a77..a5c95bed08c09c580795b67327295d40357dddec 100644 --- a/drivers/gpu/drm/lima/lima_pp.c +++ b/drivers/gpu/drm/lima/lima_pp.c @@ -271,6 +271,8 @@ void lima_pp_fini(struct lima_ip *ip) int lima_pp_bcast_resume(struct lima_ip *ip) { + /* PP has been reset by individual PP resume */ + ip->data.async_reset = false; return 0; } diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index 08802e5177f6da23a5e539d0ea74fe13da1006f0..4d2290f88edb5e74a3efe260de3b3d009f9af898 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe, */ if (fb) { mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0)); - if (!mcde->video_mode) - /* Send a single frame using software sync */ - mcde_display_send_one_frame(mcde); + if (!mcde->video_mode) { + /* + * Send a single frame using software sync if the flow + * is not active yet. + */ + if (mcde->flow_active == 0) + mcde_display_send_one_frame(mcde); + } dev_info_once(mcde->dev, "sent first display update\n"); } else { /* diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index c420f5a3d33b9fc452a49e3bdacafe7210e43d3d..aa74aac3cbccddedd4b4ac2abd267ea9c9cbff5a 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -6,12 +6,12 @@ config DRM_MEDIATEK depends on COMMON_CLK depends on HAVE_ARM_SMCCC depends on OF + depends on MTK_MMSYS select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL select MEMORY - select MTK_MMSYS select MTK_SMI select VIDEOMODE_HELPERS help diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index fe46c4bac64d77a1cdeb0e52509d45f6d8384db8..7cd8f415fd029e368b2348cbe8915dc995ae6515 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -193,7 +193,6 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) int ret; int i; - DRM_DEBUG_DRIVER("%s\n", __func__); for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk); if (ret) { @@ -213,7 +212,6 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc) { int i; - DRM_DEBUG_DRIVER("%s\n", __func__); for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk); } @@ -258,7 +256,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) int ret; int i; - DRM_DEBUG_DRIVER("%s\n", __func__); if (WARN_ON(!crtc->state)) return -EINVAL; @@ -299,7 +296,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) goto err_mutex_unprepare; } - DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n"); for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) { mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev, mtk_crtc->ddp_comp[i]->id, @@ -349,7 +345,6 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) struct drm_crtc *crtc = &mtk_crtc->base; int i; - DRM_DEBUG_DRIVER("%s\n", __func__); for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); if (i == 1) @@ -831,7 +826,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, #if IS_REACHABLE(CONFIG_MTK_CMDQ) mtk_crtc->cmdq_client = - cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base), + cmdq_mbox_create(mtk_crtc->mmsys_dev, + drm_crtc_index(&mtk_crtc->base), 2000); if (IS_ERR(mtk_crtc->cmdq_client)) { dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 6bd369434d9d63c07d1d3073616918f5840b0eca..040a8f393fe24da31e6bcee0f6fe69c6668d6318 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -444,7 +444,6 @@ static int mtk_drm_probe(struct platform_device *pdev) if (!private) return -ENOMEM; - private->data = of_device_get_match_data(dev); private->mmsys_dev = dev->parent; if (!private->mmsys_dev) { dev_err(dev, "Failed to get MMSYS device\n"); @@ -514,7 +513,8 @@ static int mtk_drm_probe(struct platform_device *pdev) goto err_node; } - ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); + ret = mtk_ddp_comp_init(dev->parent, node, comp, + comp_id, NULL); if (ret) { of_node_put(node); goto err_node; @@ -571,7 +571,6 @@ static int mtk_drm_sys_suspend(struct device *dev) int ret; ret = drm_mode_config_helper_suspend(drm); - DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); return ret; } @@ -583,7 +582,6 @@ static int mtk_drm_sys_resume(struct device *dev) int ret; ret = drm_mode_config_helper_resume(drm); - DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index c2bd683a87c82857c74e508833d56256b731160b..92141a19681b901afc4c2e55d583fdcac449c62c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -164,6 +164,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, true, true); } +static void mtk_plane_atomic_disable(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct mtk_plane_state *state = to_mtk_plane_state(plane->state); + + state->pending.enable = false; + wmb(); /* Make sure the above parameter is set before update */ + state->pending.dirty = true; +} + static void mtk_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -178,6 +188,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, if (!crtc || WARN_ON(!fb)) return; + if (!plane->state->visible) { + mtk_plane_atomic_disable(plane, old_state); + return; + } + gem = fb->obj[0]; mtk_gem = to_mtk_gem_obj(gem); addr = mtk_gem->dma_addr; @@ -200,16 +215,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, state->pending.dirty = true; } -static void mtk_plane_atomic_disable(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct mtk_plane_state *state = to_mtk_plane_state(plane->state); - - state->pending.enable = false; - wmb(); /* Make sure the above parameter is set before update */ - state->pending.dirty = true; -} - static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = mtk_plane_atomic_check, diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 270bf22c98feb06248672d0d89de28d2a7419c48..02ac55c13a80bbcf84d8488d93842660bb3d67ec 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -316,10 +316,7 @@ static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi) static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi) { - u32 tmp_reg1; - - tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON); - return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false; + return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN; } static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 5feb760617cbdf86b1104b084b76d037c0119ca5..1eebe310470afa70a9d87453237686d9351cb7dd 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1630,8 +1630,6 @@ static int mtk_hdmi_audio_startup(struct device *dev, void *data) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - mtk_hdmi_audio_enable(hdmi); return 0; @@ -1641,8 +1639,6 @@ static void mtk_hdmi_audio_shutdown(struct device *dev, void *data) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - mtk_hdmi_audio_disable(hdmi); } @@ -1651,8 +1647,6 @@ mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); - dev_dbg(dev, "%s(%d)\n", __func__, enable); - if (enable) mtk_hdmi_hw_aud_mute(hdmi); else @@ -1665,8 +1659,6 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); - dev_dbg(dev, "%s\n", __func__); - memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len)); return 0; @@ -1766,7 +1758,6 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev) goto err_bridge_remove; } - dev_dbg(dev, "mediatek hdmi probe success\n"); return 0; err_bridge_remove: @@ -1789,7 +1780,7 @@ static int mtk_hdmi_suspend(struct device *dev) struct mtk_hdmi *hdmi = dev_get_drvdata(dev); mtk_hdmi_clk_disable_audio(hdmi); - dev_dbg(dev, "hdmi suspend success!\n"); + return 0; } @@ -1804,7 +1795,6 @@ static int mtk_hdmi_resume(struct device *dev) return ret; } - dev_dbg(dev, "hdmi resume success!\n"); return 0; } #endif diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c index b55f5167520586d0f4fcd81ba7147c1b06a8f51a..827b93786facb137fc62b3b3c1dc14fa66fa2bba 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c @@ -107,60 +107,10 @@ #define RGS_HDMITX_5T1_EDG (0xf << 4) #define RGS_HDMITX_PLUG_TST BIT(0) -static const u8 PREDIV[3][4] = { - {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */ - {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */ - {0x1, 0x1, 0x1, 0x1} /* 148Mhz */ -}; - -static const u8 TXDIV[3][4] = { - {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */ - {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */ - {0x1, 0x0, 0x0, 0x0} /* 148Mhz */ -}; - -static const u8 FBKSEL[3][4] = { - {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */ - {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */ - {0x1, 0x0, 0x1, 0x1} /* 148Mhz */ -}; - -static const u8 FBKDIV[3][4] = { - {19, 24, 29, 19}, /* 27Mhz */ - {19, 24, 14, 19}, /* 74Mhz */ - {19, 24, 14, 19} /* 148Mhz */ -}; - -static const u8 DIVEN[3][4] = { - {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */ - {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */ - {0x2, 0x2, 0x2, 0x2} /* 148Mhz */ -}; - -static const u8 HTPLLBP[3][4] = { - {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */ - {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */ - {0xc, 0xf, 0xf, 0xc} /* 148Mhz */ -}; - -static const u8 HTPLLBC[3][4] = { - {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */ - {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */ - {0x2, 0x3, 0x3, 0x2} /* 148Mhz */ -}; - -static const u8 HTPLLBR[3][4] = { - {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */ - {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */ - {0x1, 0x2, 0x2, 0x1} /* 148Mhz */ -}; - static int mtk_hdmi_pll_prepare(struct clk_hw *hw) { struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - dev_dbg(hdmi_phy->dev, "%s\n", __func__); - mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN); mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN); @@ -178,8 +128,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw) { struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); - dev_dbg(hdmi_phy->dev, "%s\n", __func__); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN); mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN); usleep_range(100, 150); diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h index 8ea00546cd4e29187ee29ed9889505a258532a3e..049c4bfe2a3aefd0abde8a8a6d0fa6fd374259b1 100644 --- a/drivers/gpu/drm/meson/meson_registers.h +++ b/drivers/gpu/drm/meson/meson_registers.h @@ -261,6 +261,12 @@ #define VIU_OSD_FIFO_DEPTH_VAL(val) ((val & 0x7f) << 12) #define VIU_OSD_WORDS_PER_BURST(words) (((words & 0x4) >> 1) << 22) #define VIU_OSD_FIFO_LIMITS(size) ((size & 0xf) << 24) +#define VIU_OSD_BURST_LENGTH_24 (0x0 << 31 | 0x0 << 10) +#define VIU_OSD_BURST_LENGTH_32 (0x0 << 31 | 0x1 << 10) +#define VIU_OSD_BURST_LENGTH_48 (0x0 << 31 | 0x2 << 10) +#define VIU_OSD_BURST_LENGTH_64 (0x0 << 31 | 0x3 << 10) +#define VIU_OSD_BURST_LENGTH_96 (0x1 << 31 | 0x0 << 10) +#define VIU_OSD_BURST_LENGTH_128 (0x1 << 31 | 0x1 << 10) #define VD1_IF0_GEN_REG 0x1a50 #define VD1_IF0_CANVAS0 0x1a51 diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c index 304f8ff1339cb6c35acb5e239de5f08b4481ed5b..aede0c67a57f09c8effef544582e8659b0ec087e 100644 --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c @@ -411,13 +411,6 @@ void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv) priv->io_base + _REG(VIU_MISC_CTRL1)); } -static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length) -{ - uint32_t val = (((length & 0x80) % 24) / 12); - - return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31)); -} - void meson_viu_init(struct meson_drm *priv) { uint32_t reg; @@ -444,9 +437,9 @@ void meson_viu_init(struct meson_drm *priv) VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */ if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) - reg |= meson_viu_osd_burst_length_reg(32); + reg |= VIU_OSD_BURST_LENGTH_32; else - reg |= meson_viu_osd_burst_length_reg(64); + reg |= VIU_OSD_BURST_LENGTH_64; writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT)); writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT)); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index d472942102f50db0328e21ae8cb44eca2d986434..800b7757252e3c92a97af405dc2d34f245a652da 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -601,6 +601,9 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc) (0x0100 << nv_crtc->index), }; + if (!nv_encoder->audio) + return; + nv_encoder->audio = false; nvif_mthd(&disp->disp->object, 0, &args, sizeof(args)); @@ -2070,7 +2073,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) */ if (core->assign_windows) { core->func->wndw.owner(core); - core->func->update(core, interlock, false); + nv50_disp_atomic_commit_core(state, interlock); core->assign_windows = false; interlock[NV50_DISP_INTERLOCK_CORE] = 0; } @@ -2503,7 +2506,7 @@ nv50_display_create(struct drm_device *dev) if (disp->disp->object.oclass >= TU102_DISP) nouveau_display(dev)->format_modifiers = wndwc57e_modifiers; else - if (disp->disp->object.oclass >= GF110_DISP) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) nouveau_display(dev)->format_modifiers = disp90xx_modifiers; else nouveau_display(dev)->format_modifiers = disp50xx_modifiers; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 496c4621cc787dd69028a4d89705f202b13dca48..07373bbc2acf570e2e40ac8e4204fac704b42463 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -191,6 +191,7 @@ nouveau_decode_mod(struct nouveau_drm *drm, uint32_t *tile_mode, uint8_t *kind) { + struct nouveau_display *disp = nouveau_display(drm->dev); BUG_ON(!tile_mode || !kind); if (modifier == DRM_FORMAT_MOD_LINEAR) { @@ -202,6 +203,12 @@ nouveau_decode_mod(struct nouveau_drm *drm, * Extract the block height and kind from the corresponding * modifier fields. See drm_fourcc.h for details. */ + + if ((modifier & (0xffull << 12)) == 0ull) { + /* Legacy modifier. Translate to this dev's 'kind.' */ + modifier |= disp->format_modifiers[0] & (0xffull << 12); + } + *tile_mode = (uint32_t)(modifier & 0xF); *kind = (uint8_t)((modifier >> 12) & 0xFF); @@ -227,6 +234,16 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, } } +static const u64 legacy_modifiers[] = { + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4), + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5), + DRM_FORMAT_MOD_INVALID +}; + static int nouveau_validate_decode_mod(struct nouveau_drm *drm, uint64_t modifier, @@ -247,8 +264,14 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm, (disp->format_modifiers[mod] != modifier); mod++); - if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) - return -EINVAL; + if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) { + for (mod = 0; + (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) && + (legacy_modifiers[mod] != modifier); + mod++); + if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID) + return -EINVAL; + } nouveau_decode_mod(drm, modifier, tile_mode, kind); diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index e5c230d9ae24ed2ccca95c4d25c6765ef26beca0..cc99938375087d5c21e4a0bd0bb77bac92315821 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -550,7 +550,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, *dma_addr)) goto out_free_page; - if (drm->dmem->migrate.copy_func(drm, page_size(spage), + if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr)) goto out_dma_unmap; } else { diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 3d11b84d4cf9f8a1e333a1f90efd44d98543525e..d5c23d1c20d88d3122eade13aa08e5b14e9ddcb6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, struct drm_framebuffer *fb; struct nouveau_channel *chan; struct nouveau_bo *nvbo; - struct drm_mode_fb_cmd2 mode_cmd; + struct drm_mode_fb_cmd2 mode_cmd = {}; int ret; mode_cmd.width = sizes->surface_width; @@ -590,6 +590,7 @@ nouveau_fbcon_init(struct drm_device *dev) drm_fb_helper_fini(&fbcon->helper); free: kfree(fbcon); + drm->fbcon = NULL; return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index ba9f9359c30e1a486d07e128f8b9bc6c2426a43d..6586d9d3987402e1daa9d3e371c8b95601d1a8b2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -562,6 +562,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm, .end = notifier->notifier.interval_tree.last + 1, .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE, .hmm_pfns = hmm_pfns, + .dev_private_owner = drm->dev, }; struct mm_struct *mm = notifier->notifier.mm; int ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index dcf08249374a35dd1af04185bc3245a2ba18f0c3..dffcac249211c3c02667d5e239ffade0916162b7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c @@ -117,15 +117,6 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type, { struct nvkm_ior *ior; - /* First preference is to reuse the OR that is currently armed - * on HW, if any, in order to prevent unnecessary switching. - */ - list_for_each_entry(ior, &outp->disp->ior, head) { - if (!ior->identity && !!ior->func->hda.hpd == hda && - !ior->asy.outp && ior->arm.outp == outp) - return nvkm_outp_acquire_ior(outp, user, ior); - } - /* Failing that, a completely unused OR is the next best thing. */ list_for_each_entry(ior, &outp->disp->ior, head) { if (!ior->identity && !!ior->func->hda.hpd == hda && @@ -173,6 +164,27 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda) return nvkm_outp_acquire_ior(outp, user, ior); } + /* First preference is to reuse the OR that is currently armed + * on HW, if any, in order to prevent unnecessary switching. + */ + list_for_each_entry(ior, &outp->disp->ior, head) { + if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) { + /*XXX: For various complicated reasons, we can't outright switch + * the boot-time OR on the first modeset without some fairly + * invasive changes. + * + * The systems that were fixed by modifying the OR selection + * code to account for HDA support shouldn't regress here as + * the HDA-enabled ORs match the relevant output's pad macro + * index, and the firmware seems to select an OR this way. + * + * This warning is to make it obvious if that proves wrong. + */ + WARN_ON(hda && !ior->func->hda.hpd); + return nvkm_outp_acquire_ior(outp, user, ior); + } + } + /* If we don't need HDA, first try to acquire an OR that doesn't * support it to leave free the ones that do. */ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index c8ab1b5741a3e3c0e93a9943c0b3811b8c646ea6..db7769cb33ebadfa10078c05f019c35cd4680338 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c @@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, if (retries) udelay(400); - /* transaction request, wait up to 1ms for it to complete */ + /* transaction request, wait up to 2ms for it to complete */ nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl); - timeout = 1000; + timeout = 2000; do { ctrl = nvkm_rd32(device, 0x00e4e4 + base); udelay(1); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index 7ef60895f43a7808229a8d72e27b2b54d67f3d48..edb6148cbca042c544939adb2cae2588e90a3e0e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c @@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, if (retries) udelay(400); - /* transaction request, wait up to 1ms for it to complete */ + /* transaction request, wait up to 2ms for it to complete */ nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl); - timeout = 1000; + timeout = 2000; do { ctrl = nvkm_rd32(device, 0x00d954 + base); udelay(1); diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 46fe1805c588080130baf4ac187342ce0e7351fc..2649469070aa59fcb2a8f9be598dcbcff0dd40f9 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -615,9 +615,9 @@ static const struct panel_desc boe_tv101wum_nl6_desc = { static const struct drm_display_mode auo_kd101n80_45na_default_mode = { .clock = 157000, .hdisplay = 1200, - .hsync_start = 1200 + 80, - .hsync_end = 1200 + 80 + 24, - .htotal = 1200 + 80 + 24 + 36, + .hsync_start = 1200 + 60, + .hsync_end = 1200 + 60 + 24, + .htotal = 1200 + 60 + 24 + 56, .vdisplay = 1920, .vsync_start = 1920 + 16, .vsync_end = 1920 + 16 + 4, diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 5178f87d6574e9502ebfb923769b452e5790cdce..4aeb960ccf1511c70094a110227de547c2437ec3 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1250,7 +1250,21 @@ static const struct panel_desc boe_nv133fhm_n61 = { .height = 165, }, .delay = { - .hpd_absent_delay = 200, + /* + * When power is first given to the panel there's a short + * spike on the HPD line. It was explained that this spike + * was until the TCON data download was complete. On + * one system this was measured at 8 ms. We'll put 15 ms + * in the prepare delay just to be safe and take it away + * from the hpd_absent_delay (which would otherwise be 200 ms) + * to handle this. That means: + * - If HPD isn't hooked up you still have 200 ms delay. + * - If HPD is hooked up we won't try to look at it for the + * first 15 ms. + */ + .prepare = 15, + .hpd_absent_delay = 185, + .unprepare = 500, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 134aa2b01f9071d14f3658019d74aebaed0285e9..f434efdeca44dd03187cd76dfe5e8104304a313f 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -5563,6 +5563,7 @@ static int ci_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.dpm.ps) return -ENOMEM; power_state_offset = (u8 *)state_array->states; + rdev->pm.dpm.num_ps = 0; for (i = 0; i < state_array->ucNumEntries; i++) { u8 *idx; power_state = (union pplib_power_state *)power_state_offset; @@ -5572,10 +5573,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) if (!rdev->pm.power_state[i].clock_info) return -EINVAL; ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(rdev->pm.dpm.ps); + if (ps == NULL) return -ENOMEM; - } rdev->pm.dpm.ps[i].ps_priv = ps; ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i], non_clock_info, @@ -5597,8 +5596,8 @@ static int ci_parse_power_table(struct radeon_device *rdev) k++; } power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + rdev->pm.dpm.num_ps = i + 1; } - rdev->pm.dpm.num_ps = state_array->ucNumEntries; /* fill in the vce power states */ for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 557cbe5ab35f023a295aab16e817e174c61f6d74..2f2c9f0a10716a9767bf2f1fb6325b001fa05566 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -260,7 +260,7 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force) unsigned long reg; reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG); - if (reg & SUN4I_HDMI_HPD_HIGH) { + if (!(reg & SUN4I_HDMI_HPD_HIGH)) { cec_phys_addr_invalidate(hdmi->cec_adap); return connector_status_disconnected; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 9ffa9c75a5da261510809a0fa54be4b18e878933..16b3856296889e3f24be5eae8b7ec4eaa94a0040 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1069,10 +1069,6 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, if (new_content_type != SAME_AS_DISPLAY) { struct vmw_surface_metadata metadata = {0}; - metadata.base_size.width = hdisplay; - metadata.base_size.height = vdisplay; - metadata.base_size.depth = 1; - /* * If content buffer is a buffer object, then we have to * construct surface info @@ -1104,6 +1100,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, metadata = new_vfbs->surface->metadata; } + metadata.base_size.width = hdisplay; + metadata.base_size.height = vdisplay; + metadata.base_size.depth = 1; + if (vps->surf) { struct drm_vmw_size cur_base_size = vps->surf->metadata.base_size; diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c index 6f1fe7248d81c7404e8ea864f09ca65967ebfc52..a9c2de95c5e279514388a8fecd9e48683c74d5e2 100644 --- a/drivers/hid/hid-alps.c +++ b/drivers/hid/hid-alps.c @@ -25,6 +25,7 @@ #define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */ #define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */ +#define U1_ABSOLUTE_REPORT_ID_SECD 0x02 /* FW-PTP Absolute data ReportID */ #define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */ #define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */ @@ -368,6 +369,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size) case U1_FEATURE_REPORT_ID: break; case U1_ABSOLUTE_REPORT_ID: + case U1_ABSOLUTE_REPORT_ID_SECD: for (i = 0; i < hdata->max_fingers; i++) { u8 *contact = &data[i * 5]; diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 359bdfbe37016117cb0e8131fa3ab9b3a2d19aa1..e82f604d33e95c32ea88bea2bb10abb73820d9c2 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -60,6 +60,7 @@ MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. " struct apple_sc { unsigned long quirks; unsigned int fn_on; + unsigned int fn_found; DECLARE_BITMAP(pressed_numlock, KEY_CNT); }; @@ -365,12 +366,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { + struct apple_sc *asc = hid_get_drvdata(hdev); + if (usage->hid == (HID_UP_CUSTOM | 0x0003) || usage->hid == (HID_UP_MSVENDOR | 0x0003) || usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) { /* The fn key on Apple USB keyboards */ set_bit(EV_REP, hi->input->evbit); hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); + asc->fn_found = true; apple_setup_input(hi->input); return 1; } @@ -397,6 +401,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi, return 0; } +static int apple_input_configured(struct hid_device *hdev, + struct hid_input *hidinput) +{ + struct apple_sc *asc = hid_get_drvdata(hdev); + + if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { + hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); + asc->quirks = 0; + } + + return 0; +} + static int apple_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -611,6 +628,7 @@ static struct hid_driver apple_driver = { .event = apple_event, .input_mapping = apple_input_mapping, .input_mapped = apple_input_mapped, + .input_configured = apple_input_configured, }; module_hid_driver(apple_driver); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 874fc3791f3bbd8cb4d16da9534127dad82d4895..6f370e020feb325cbce914d824d67641bae0d6e9 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -618,6 +618,7 @@ #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2 0xa0c2 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096 +#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293 0xa293 #define USB_VENDOR_ID_IMATION 0x0718 #define USB_DEVICE_ID_DISC_STAKKA 0xd000 @@ -998,6 +999,8 @@ #define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232 #define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a +#define USB_VENDOR_ID_SAI 0x17dd + #define USB_VENDOR_ID_SAITEK 0x06a3 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 48dff5d6b60563a92ac9f6300c771b631e4eb137..a78c13cc9f470f555a9a63553ef187cbc8e2d041 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1153,7 +1153,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; - dj_report->device_index = 0xFF; + dj_report->device_index = HIDPP_RECEIVER_INDEX; dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES; retval = logi_dj_recv_send_report(djrcv_dev, dj_report); kfree(dj_report); @@ -1175,7 +1175,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, if (djrcv_dev->type == recvr_type_dj) { dj_report->report_id = REPORT_ID_DJ_SHORT; - dj_report->device_index = 0xFF; + dj_report->device_index = HIDPP_RECEIVER_INDEX; dj_report->report_type = REPORT_TYPE_CMD_SWITCH; dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F; dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = @@ -1204,7 +1204,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, memset(buf, 0, HIDPP_REPORT_SHORT_LENGTH); buf[0] = REPORT_ID_HIDPP_SHORT; - buf[1] = 0xFF; + buf[1] = HIDPP_RECEIVER_INDEX; buf[2] = 0x80; buf[3] = 0x00; buf[4] = 0x00; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 1e1cf8eae649e0aeda50d6d641d8a6552d95c5e4..b8b53dc95e86b7b0f3ad96ae02b1cd5a159d8eff 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -3146,7 +3146,7 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp) multiplier = 1; hidpp->vertical_wheel_counter.wheel_multiplier = multiplier; - hid_info(hidpp->hid_dev, "multiplier = %d\n", multiplier); + hid_dbg(hidpp->hid_dev, "wheel multiplier = %d\n", multiplier); return 0; } diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 34138667f8af3fde62dd157ae31b6ae3eefcdbfe..abd86903875f0012956a02a63f6f47dae1fb19a5 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -535,6 +535,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd __set_bit(MSC_RAW, input->mscbit); } + /* + * hid-input may mark device as using autorepeat, but neither + * the trackpad, nor the mouse actually want it. + */ + __clear_bit(EV_REP, input->evbit); + return 0; } diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index ca8b5c261c7ce5bc545811f81a115ad3fb345559..934fc0a798d4d03b89898b144404ce0a175ff7a7 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -88,6 +88,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, @@ -832,6 +833,7 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) }, { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAI, USB_DEVICE_ID_CYPRESS_HIDCOM) }, #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB) { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) }, diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index 6286204d4c56029656f6cf68758e75781d9ff0ff..a3b151b29bd711c2f87a6d4b6de8ac43b9219429 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -526,7 +526,8 @@ static int steam_register(struct steam_device *steam) steam_battery_register(steam); mutex_lock(&steam_devices_lock); - list_add(&steam->list, &steam_devices); + if (list_empty(&steam->list)) + list_add(&steam->list, &steam_devices); mutex_unlock(&steam_devices_lock); } @@ -552,7 +553,7 @@ static void steam_unregister(struct steam_device *steam) hid_info(steam->hdev, "Steam Controller '%s' disconnected", steam->serial_no); mutex_lock(&steam_devices_lock); - list_del(&steam->list); + list_del_init(&steam->list); mutex_unlock(&steam_devices_lock); steam->serial_no[0] = 0; } @@ -738,6 +739,7 @@ static int steam_probe(struct hid_device *hdev, mutex_init(&steam->mutex); steam->quirks = id->driver_data; INIT_WORK(&steam->work_connect, steam_work_connect_cb); + INIT_LIST_HEAD(&steam->list); steam->client_hdev = steam_create_client_hid(hdev); if (IS_ERR(steam->client_hdev)) { diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c index ec142bc8c1dafaffde8478c4b2de7b8a8fee71ca..35f3bfc3e6f5913fe23b231304dab4717ad01611 100644 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c @@ -373,6 +373,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { }, .driver_data = (void *)&sipodev_desc }, + { + .ident = "Mediacom FlexBook edge 13", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"), + }, + .driver_data = (void *)&sipodev_desc + }, { .ident = "Odys Winbook 13", .matches = { diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c index e95b7426106e658b9ef91626e544beab7a48641d..29603742c858333c9995cef0a5789bd68ce42c8c 100644 --- a/drivers/hwmon/amd_energy.c +++ b/drivers/hwmon/amd_energy.c @@ -362,7 +362,7 @@ static struct platform_driver amd_energy_driver = { static struct platform_device *amd_energy_platdev; static const struct x86_cpu_id cpu_ids[] __initconst = { - X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL), + X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x17, 0x31, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cpu_ids); diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index 33fb54845bf6ddf1c90c12de7e41bf309917cb73..3d8239fd66ed6107f1cc76eac8cd2f72dd25a9cd 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c @@ -851,6 +851,8 @@ static int aspeed_create_fan(struct device *dev, ret = of_property_read_u32(child, "reg", &pwm_port); if (ret) return ret; + if (pwm_port >= ARRAY_SIZE(pwm_port_params)) + return -EINVAL; aspeed_create_pwm_port(priv, (u8)pwm_port); ret = of_property_count_u8_elems(child, "cooling-levels"); diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c index 0d4f3d97ffc61f8af1d4d4dff6c269353fcd5c9a..72c760373957877d914f00a53e268a22a471330b 100644 --- a/drivers/hwmon/drivetemp.c +++ b/drivers/hwmon/drivetemp.c @@ -285,6 +285,42 @@ static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val) return err; } +static const char * const sct_avoid_models[] = { +/* + * These drives will have WRITE FPDMA QUEUED command timeouts and sometimes just + * freeze until power-cycled under heavy write loads when their temperature is + * getting polled in SCT mode. The SMART mode seems to be fine, though. + * + * While only the 3 TB model (DT01ACA3) was actually caught exhibiting the + * problem let's play safe here to avoid data corruption and ban the whole + * DT01ACAx family. + + * The models from this array are prefix-matched. + */ + "TOSHIBA DT01ACA", +}; + +static bool drivetemp_sct_avoid(struct drivetemp_data *st) +{ + struct scsi_device *sdev = st->sdev; + unsigned int ctr; + + if (!sdev->model) + return false; + + /* + * The "model" field contains just the raw SCSI INQUIRY response + * "product identification" field, which has a width of 16 bytes. + * This field is space-filled, but is NOT NULL-terminated. + */ + for (ctr = 0; ctr < ARRAY_SIZE(sct_avoid_models); ctr++) + if (!strncmp(sdev->model, sct_avoid_models[ctr], + strlen(sct_avoid_models[ctr]))) + return true; + + return false; +} + static int drivetemp_identify_sata(struct drivetemp_data *st) { struct scsi_device *sdev = st->sdev; @@ -326,6 +362,13 @@ static int drivetemp_identify_sata(struct drivetemp_data *st) /* bail out if this is not a SATA device */ if (!is_ata || !is_sata) return -ENODEV; + + if (have_sct && drivetemp_sct_avoid(st)) { + dev_notice(&sdev->sdev_gendev, + "will avoid using SCT for temperature monitoring\n"); + have_sct = false; + } + if (!have_sct) goto skip_sct; diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index 491a570e8e508a10227a8a1808ede6335ff2fa04..924c02c1631dd178a6b4b5e2e588bf4a85b768f4 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c @@ -443,7 +443,7 @@ static ssize_t pwm1_enable_store(struct device *dev, } result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); - if (result) { + if (result < 0) { count = result; goto err; } diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index e7e1ddc1d631b25bc7a20541458b41a291c88672..750b08713dee50434afc2ac0da80a1bd8ec9e81d 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -786,13 +786,13 @@ static const char *const nct6798_temp_label[] = { "Agent1 Dimm1", "BYTE_TEMP0", "BYTE_TEMP1", - "", - "", + "PECI Agent 0 Calibration", /* undocumented */ + "PECI Agent 1 Calibration", /* undocumented */ "", "Virtual_TEMP" }; -#define NCT6798_TEMP_MASK 0x8fff0ffe +#define NCT6798_TEMP_MASK 0xbfff0ffe #define NCT6798_VIRT_TEMP_MASK 0x80000c00 /* NCT6102D/NCT6106D specific data */ diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c index e25f541227daef2df9fff7308fce5d02a4305d4b..19317575d1c6a781e994ce05cb19c4f8099b4119 100644 --- a/drivers/hwmon/pmbus/adm1275.c +++ b/drivers/hwmon/pmbus/adm1275.c @@ -465,6 +465,7 @@ MODULE_DEVICE_TABLE(i2c, adm1275_id); static int adm1275_probe(struct i2c_client *client, const struct i2c_device_id *id) { + s32 (*config_read_fn)(const struct i2c_client *client, u8 reg); u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1]; int config, device_config; int ret; @@ -510,11 +511,16 @@ static int adm1275_probe(struct i2c_client *client, "Device mismatch: Configured %s, detected %s\n", id->name, mid->name); - config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG); + if (mid->driver_data == adm1272 || mid->driver_data == adm1278 || + mid->driver_data == adm1293 || mid->driver_data == adm1294) + config_read_fn = i2c_smbus_read_word_data; + else + config_read_fn = i2c_smbus_read_byte_data; + config = config_read_fn(client, ADM1275_PMON_CONFIG); if (config < 0) return config; - device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG); + device_config = config_read_fn(client, ADM1275_DEVICE_CONFIG); if (device_config < 0) return device_config; diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c index 286d3cfda7de868306099315ea91c975d885c718..d421e691318b32b7f89d77fc33f2c2a49bb0002f 100644 --- a/drivers/hwmon/scmi-hwmon.c +++ b/drivers/hwmon/scmi-hwmon.c @@ -147,7 +147,7 @@ static enum hwmon_sensor_types scmi_types[] = { [ENERGY] = hwmon_energy, }; -static u32 hwmon_attributes[] = { +static u32 hwmon_attributes[hwmon_max] = { [hwmon_chip] = HWMON_C_REGISTER_TZ, [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL, [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL, diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c index 40387d58c8e7e7106a33841a4052e804b4920792..3ccc703dc94096a366c93c5076cd1d321cdca84e 100644 --- a/drivers/hwtracing/coresight/coresight-cti.c +++ b/drivers/hwtracing/coresight/coresight-cti.c @@ -747,17 +747,50 @@ static int cti_dying_cpu(unsigned int cpu) return 0; } +static int cti_pm_setup(struct cti_drvdata *drvdata) +{ + int ret; + + if (drvdata->ctidev.cpu == -1) + return 0; + + if (nr_cti_cpu) + goto done; + + cpus_read_lock(); + ret = cpuhp_setup_state_nocalls_cpuslocked( + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, + "arm/coresight_cti:starting", + cti_starting_cpu, cti_dying_cpu); + if (ret) { + cpus_read_unlock(); + return ret; + } + + ret = cpu_pm_register_notifier(&cti_cpu_pm_nb); + cpus_read_unlock(); + if (ret) { + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); + return ret; + } + +done: + nr_cti_cpu++; + cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata; + + return 0; +} + /* release PM registrations */ static void cti_pm_release(struct cti_drvdata *drvdata) { - if (drvdata->ctidev.cpu >= 0) { - if (--nr_cti_cpu == 0) { - cpu_pm_unregister_notifier(&cti_cpu_pm_nb); + if (drvdata->ctidev.cpu == -1) + return; - cpuhp_remove_state_nocalls( - CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); - } - cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL; + cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL; + if (--nr_cti_cpu == 0) { + cpu_pm_unregister_notifier(&cti_cpu_pm_nb); + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING); } } @@ -823,19 +856,14 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) /* driver data*/ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); - if (!drvdata) { - ret = -ENOMEM; - dev_info(dev, "%s, mem err\n", __func__); - goto err_out; - } + if (!drvdata) + return -ENOMEM; /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); - if (IS_ERR(base)) { - ret = PTR_ERR(base); - dev_err(dev, "%s, remap err\n", __func__); - goto err_out; - } + if (IS_ERR(base)) + return PTR_ERR(base); + drvdata->base = base; dev_set_drvdata(dev, drvdata); @@ -854,8 +882,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) pdata = coresight_cti_get_platform_data(dev); if (IS_ERR(pdata)) { dev_err(dev, "coresight_cti_get_platform_data err\n"); - ret = PTR_ERR(pdata); - goto err_out; + return PTR_ERR(pdata); } /* default to powered - could change on PM notifications */ @@ -867,35 +894,20 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) drvdata->ctidev.cpu); else cti_desc.name = coresight_alloc_device_name(&cti_sys_devs, dev); - if (!cti_desc.name) { - ret = -ENOMEM; - goto err_out; - } + if (!cti_desc.name) + return -ENOMEM; /* setup CPU power management handling for CPU bound CTI devices. */ - if (drvdata->ctidev.cpu >= 0) { - cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata; - if (!nr_cti_cpu++) { - cpus_read_lock(); - ret = cpuhp_setup_state_nocalls_cpuslocked( - CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, - "arm/coresight_cti:starting", - cti_starting_cpu, cti_dying_cpu); - - if (!ret) - ret = cpu_pm_register_notifier(&cti_cpu_pm_nb); - cpus_read_unlock(); - if (ret) - goto err_out; - } - } + ret = cti_pm_setup(drvdata); + if (ret) + return ret; /* create dynamic attributes for connections */ ret = cti_create_cons_sysfs(dev, drvdata); if (ret) { dev_err(dev, "%s: create dynamic sysfs entries failed\n", cti_desc.name); - goto err_out; + goto pm_release; } /* set up coresight component description */ @@ -908,7 +920,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) drvdata->csdev = coresight_register(&cti_desc); if (IS_ERR(drvdata->csdev)) { ret = PTR_ERR(drvdata->csdev); - goto err_out; + goto pm_release; } /* add to list of CTI devices */ @@ -927,7 +939,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) dev_info(&drvdata->csdev->dev, "CTI initialized\n"); return 0; -err_out: +pm_release: cti_pm_release(drvdata); return ret; } diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 747afc875f91ba42cade9c106fd03eb01afbfe19..0c35cd5e0d1d9e3c04b3b86c484979c298570c2c 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1388,18 +1388,57 @@ static struct notifier_block etm4_cpu_pm_nb = { .notifier_call = etm4_cpu_pm_notify, }; -static int etm4_cpu_pm_register(void) +/* Setup PM. Called with cpus locked. Deals with error conditions and counts */ +static int etm4_pm_setup_cpuslocked(void) { - if (IS_ENABLED(CONFIG_CPU_PM)) - return cpu_pm_register_notifier(&etm4_cpu_pm_nb); + int ret; - return 0; + if (etm4_count++) + return 0; + + ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb); + if (ret) + goto reduce_count; + + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, + "arm/coresight4:starting", + etm4_starting_cpu, etm4_dying_cpu); + + if (ret) + goto unregister_notifier; + + ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, + "arm/coresight4:online", + etm4_online_cpu, NULL); + + /* HP dyn state ID returned in ret on success */ + if (ret > 0) { + hp_online = ret; + return 0; + } + + /* failed dyn state - remove others */ + cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING); + +unregister_notifier: + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); + +reduce_count: + --etm4_count; + return ret; } -static void etm4_cpu_pm_unregister(void) +static void etm4_pm_clear(void) { - if (IS_ENABLED(CONFIG_CPU_PM)) - cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); + if (--etm4_count != 0) + return; + + cpu_pm_unregister_notifier(&etm4_cpu_pm_nb); + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); + if (hp_online) { + cpuhp_remove_state_nocalls(hp_online); + hp_online = 0; + } } static int etm4_probe(struct amba_device *adev, const struct amba_id *id) @@ -1453,24 +1492,15 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) etm4_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); - if (!etm4_count++) { - cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, - "arm/coresight4:starting", - etm4_starting_cpu, etm4_dying_cpu); - ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, - "arm/coresight4:online", - etm4_online_cpu, NULL); - if (ret < 0) - goto err_arch_supported; - hp_online = ret; + ret = etm4_pm_setup_cpuslocked(); + cpus_read_unlock(); - ret = etm4_cpu_pm_register(); - if (ret) - goto err_arch_supported; + /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */ + if (ret) { + etmdrvdata[drvdata->cpu] = NULL; + return ret; } - cpus_read_unlock(); - if (etm4_arch_supported(drvdata->arch) == false) { ret = -EINVAL; goto err_arch_supported; @@ -1517,13 +1547,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) err_arch_supported: etmdrvdata[drvdata->cpu] = NULL; - if (--etm4_count == 0) { - etm4_cpu_pm_unregister(); - - cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); - if (hp_online) - cpuhp_remove_state_nocalls(hp_online); - } + etm4_pm_clear(); return ret; } diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index ca232ec565e83a6db9dd3408b312d20e6bd556f5..c9ac3dc651135d156915f425ce9c232c94e568ad 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -1021,15 +1021,30 @@ int intel_th_set_output(struct intel_th_device *thdev, { struct intel_th_device *hub = to_intel_th_hub(thdev); struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); + int ret; /* In host mode, this is up to the external debugger, do nothing. */ if (hub->host_mode) return 0; - if (!hubdrv->set_output) - return -ENOTSUPP; + /* + * hub is instantiated together with the source device that + * calls here, so guaranteed to be present. + */ + hubdrv = to_intel_th_driver(hub->dev.driver); + if (!hubdrv || !try_module_get(hubdrv->driver.owner)) + return -EINVAL; + + if (!hubdrv->set_output) { + ret = -ENOTSUPP; + goto out; + } + + ret = hubdrv->set_output(hub, master); - return hubdrv->set_output(hub, master); +out: + module_put(hubdrv->driver.owner); + return ret; } EXPORT_SYMBOL_GPL(intel_th_set_output); diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 7ccac74553a6df3872462359d7c97200591174f1..21fdf0b9351661f95b2e03d793a7f372e3c3f7e6 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -233,11 +233,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Tiger Lake PCH-H */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x43a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Jasper Lake PCH */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Jasper Lake CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4e29), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Elkhart Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529), @@ -248,6 +258,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Emmitsburg PCH */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { 0 }, }; diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c index 3a1f4e65037841eb8a878c3f1fc4177483566c52..a1529f571491d991aec1513ce007758077ca96b0 100644 --- a/drivers/hwtracing/intel_th/sth.c +++ b/drivers/hwtracing/intel_th/sth.c @@ -161,9 +161,7 @@ static int sth_stm_link(struct stm_data *stm_data, unsigned int master, { struct sth_device *sth = container_of(stm_data, struct sth_device, stm); - intel_th_set_output(to_intel_th_device(sth->dev), master); - - return 0; + return intel_th_set_output(to_intel_th_device(sth->dev), master); } static int intel_th_sw_init(struct sth_device *sth) diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 4b72398af505df85d5d318b2aa82e69fe0dfb3e3..e4b7f2a951ad5dc716ddbb9c775fa8624847c944 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -421,20 +421,21 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) /* Read data if receive data valid is set */ while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & CDNS_I2C_SR_RXDV) { - /* - * Clear hold bit that was set for FIFO control if - * RX data left is less than FIFO depth, unless - * repeated start is selected. - */ - if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) && - !id->bus_hold_flag) - cdns_i2c_clear_bus_hold(id); - if (id->recv_count > 0) { *(id->p_recv_buf)++ = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); id->recv_count--; id->curr_recv_count--; + + /* + * Clear hold bit that was set for FIFO control + * if RX data left is less than or equal to + * FIFO DEPTH unless repeated start is selected + */ + if (id->recv_count <= CDNS_I2C_FIFO_DEPTH && + !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); + } else { dev_err(id->adap.dev.parent, "xfer_size reg rollover. xfer aborted!\n"); @@ -594,10 +595,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->recv_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); @@ -654,11 +653,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->send_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; - cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register. */ diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 18d1e4fd4cf345db8bc7cb69770cb11a00433d37..7f130829bf01814948ab54baf2aa590491e86bf6 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -367,7 +367,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, geni_se_select_mode(se, GENI_SE_FIFO); writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN); - geni_se_setup_m_cmd(se, I2C_READ, m_param); if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) { geni_se_select_mode(se, GENI_SE_FIFO); @@ -375,6 +374,8 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, dma_buf = NULL; } + geni_se_setup_m_cmd(se, I2C_READ, m_param); + time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); if (!time_left) geni_i2c_abort_xfer(gi2c); @@ -408,7 +409,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, geni_se_select_mode(se, GENI_SE_FIFO); writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN); - geni_se_setup_m_cmd(se, I2C_WRITE, m_param); if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) { geni_se_select_mode(se, GENI_SE_FIFO); @@ -416,6 +416,8 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg, dma_buf = NULL; } + geni_se_setup_m_cmd(se, I2C_WRITE, m_param); + if (!dma_buf) /* Get FIFO IRQ */ writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index a45c4bf1ec014e84b314c042edc365294a6de027..2e3e1bb75013454867c1a1d1e32036c165db7652 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -868,6 +868,7 @@ static int rcar_unreg_slave(struct i2c_client *slave) /* disable irqs and ensure none is running before clearing ptr */ rcar_i2c_write(priv, ICSIER, 0); rcar_i2c_write(priv, ICSCR, 0); + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ synchronize_irq(priv->irq); priv->slave = NULL; @@ -969,6 +970,8 @@ static int rcar_i2c_probe(struct platform_device *pdev) if (ret < 0) goto out_pm_put; + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ + if (priv->devtype == I2C_RCAR_GEN3) { priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (!IS_ERR(priv->rstc)) { diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index 5427f047faf068e392111721c7f365eb83f132d0..1589179d5eb92af2b6dc5181b9a04b30efc05dfa 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) { int ret; - if (!client || !slave_cb) { - WARN(1, "insufficient data\n"); + if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n")) return -EINVAL; - } if (!(client->flags & I2C_CLIENT_SLAVE)) dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n", @@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client) { int ret; + if (IS_ERR_OR_NULL(client)) + return -EINVAL; + if (!client->adapter->algo->unreg_slave) { dev_err(&client->dev, "%s: not supported by adapter\n", __func__); return -EOPNOTSUPP; diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 00e100fc845af27f94b0370d30346e4199d2ffc7..813bca7cfc3ed95ff571350b7ff85e5f15d0d60b 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1685,10 +1685,13 @@ static int mma8452_probe(struct i2c_client *client, ret = mma8452_set_freefall_mode(data, false); if (ret < 0) - goto buffer_cleanup; + goto unregister_device; return 0; +unregister_device: + iio_device_unregister(indio_dev); + buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c index f47606ebbbbef489c42586e00e290705c838f57b..b33fe6c3907e560254eaf8271c85bc56217f3984 100644 --- a/drivers/iio/adc/ad7780.c +++ b/drivers/iio/adc/ad7780.c @@ -329,7 +329,7 @@ static int ad7780_probe(struct spi_device *spi) ret = ad7780_init_gpios(&spi->dev, st); if (ret) - goto error_cleanup_buffer_and_trigger; + return ret; st->reg = devm_regulator_get(&spi->dev, "avdd"); if (IS_ERR(st->reg)) diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index c24c8da99eb47531ad6b7b13170b969c149932a5..7af8f0510535a25d2560b942127c0b1e9885fcc8 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -332,12 +332,12 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev) if (cl->dev->of_node != cln) continue; - if (!try_module_get(dev->driver->owner)) { + if (!try_module_get(cl->dev->driver->owner)) { mutex_unlock(®istered_clients_lock); return ERR_PTR(-ENODEV); } - get_device(dev); + get_device(cl->dev); cl->info = info; mutex_unlock(®istered_clients_lock); return cl; diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c index e9f87e42ff4f4614e746d5f044d5d76016289677..a3507624b30f8eb82fb6920ad31399287d5739ed 100644 --- a/drivers/iio/health/afe4403.c +++ b/drivers/iio/health/afe4403.c @@ -65,6 +65,7 @@ static const struct reg_field afe4403_reg_fields[] = { * @regulator: Pointer to the regulator for the IC * @trig: IIO trigger for this device * @irq: ADC_RDY line interrupt number + * @buffer: Used to construct data layout to push into IIO buffer. */ struct afe4403_data { struct device *dev; @@ -74,6 +75,8 @@ struct afe4403_data { struct regulator *regulator; struct iio_trigger *trig; int irq; + /* Ensure suitable alignment for timestamp */ + s32 buffer[8] __aligned(8); }; enum afe4403_chan_id { @@ -309,7 +312,6 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) struct iio_dev *indio_dev = pf->indio_dev; struct afe4403_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - s32 buffer[8]; u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ}; u8 rx[3]; @@ -326,7 +328,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - buffer[i++] = get_unaligned_be24(&rx[0]); + afe->buffer[i++] = get_unaligned_be24(&rx[0]); } /* Disable reading from the device */ @@ -335,7 +337,8 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private) if (ret) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c index e728bbb21ca8800b50630cd8be440d9d70f2462b..cebb1fd4d0b15b0138cdcf2127d7843d1f97cdb5 100644 --- a/drivers/iio/health/afe4404.c +++ b/drivers/iio/health/afe4404.c @@ -83,6 +83,7 @@ static const struct reg_field afe4404_reg_fields[] = { * @regulator: Pointer to the regulator for the IC * @trig: IIO trigger for this device * @irq: ADC_RDY line interrupt number + * @buffer: Used to construct a scan to push to the iio buffer. */ struct afe4404_data { struct device *dev; @@ -91,6 +92,7 @@ struct afe4404_data { struct regulator *regulator; struct iio_trigger *trig; int irq; + s32 buffer[10] __aligned(8); }; enum afe4404_chan_id { @@ -328,17 +330,17 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private) struct iio_dev *indio_dev = pf->indio_dev; struct afe4404_data *afe = iio_priv(indio_dev); int ret, bit, i = 0; - s32 buffer[10]; for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) { ret = regmap_read(afe->regmap, afe4404_channel_values[bit], - &buffer[i++]); + &afe->buffer[i++]); if (ret) goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); + iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer, + pf->timestamp); err: iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index 7ecd2ffa3132590551591826a70c07018efc6bb9..665eb7e38293bb501e63a5c3083fa18c759d50b4 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c @@ -38,6 +38,11 @@ struct hdc100x_data { /* integration time of the sensor */ int adc_int_us[2]; + /* Ensure natural alignment of timestamp */ + struct { + __be16 channels[2]; + s64 ts __aligned(8); + } scan; }; /* integration time in us */ @@ -322,7 +327,6 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) struct i2c_client *client = data->client; int delay = data->adc_int_us[0] + data->adc_int_us[1]; int ret; - s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */ /* dual read starts at temp register */ mutex_lock(&data->lock); @@ -333,13 +337,13 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) } usleep_range(delay, delay + 1000); - ret = i2c_master_recv(client, (u8 *)buf, 4); + ret = i2c_master_recv(client, (u8 *)data->scan.channels, 4); if (ret < 0) { dev_err(&client->dev, "cannot read sensor data\n"); goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); err: mutex_unlock(&data->lock); diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h index 7d6771f7cf473663b7607a0fc886fa2b4822d802..b2eb5abeaccd3f466848e1bbd318563761089a3f 100644 --- a/drivers/iio/humidity/hts221.h +++ b/drivers/iio/humidity/hts221.h @@ -14,8 +14,6 @@ #include -#define HTS221_DATA_SIZE 2 - enum hts221_sensor_type { HTS221_SENSOR_H, HTS221_SENSOR_T, @@ -39,6 +37,11 @@ struct hts221_hw { bool enabled; u8 odr; + /* Ensure natural alignment of timestamp */ + struct { + __le16 channels[2]; + s64 ts __aligned(8); + } scan; }; extern const struct dev_pm_ops hts221_pm_ops; diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c index 9fb3f33614d401b370f30c0a6ab835074dc56ae6..ba7d413d75ba2bd7173a5e43aaceb12fc4404414 100644 --- a/drivers/iio/humidity/hts221_buffer.c +++ b/drivers/iio/humidity/hts221_buffer.c @@ -160,7 +160,6 @@ static const struct iio_buffer_setup_ops hts221_buffer_ops = { static irqreturn_t hts221_buffer_handler_thread(int irq, void *p) { - u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)]; struct iio_poll_func *pf = p; struct iio_dev *iio_dev = pf->indio_dev; struct hts221_hw *hw = iio_priv(iio_dev); @@ -170,18 +169,20 @@ static irqreturn_t hts221_buffer_handler_thread(int irq, void *p) /* humidity data */ ch = &iio_dev->channels[HTS221_SENSOR_H]; err = regmap_bulk_read(hw->regmap, ch->address, - buffer, HTS221_DATA_SIZE); + &hw->scan.channels[0], + sizeof(hw->scan.channels[0])); if (err < 0) goto out; /* temperature data */ ch = &iio_dev->channels[HTS221_SENSOR_T]; err = regmap_bulk_read(hw->regmap, ch->address, - buffer + HTS221_DATA_SIZE, HTS221_DATA_SIZE); + &hw->scan.channels[1], + sizeof(hw->scan.channels[1])); if (err < 0) goto out; - iio_push_to_buffers_with_timestamp(iio_dev, buffer, + iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan, iio_get_time_ns(iio_dev)); out: diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index 1527f01a44f12f1852681c1e4212f78e83fbdaec..35253334270202b4f93eb283f70e37f02c1b6e0b 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -130,6 +130,8 @@ static const char * const iio_modifier_names[] = { [IIO_MOD_PM2P5] = "pm2p5", [IIO_MOD_PM4] = "pm4", [IIO_MOD_PM10] = "pm10", + [IIO_MOD_ETHANOL] = "ethanol", + [IIO_MOD_H2] = "h2", }; /* relies on pairs of these shared then separate */ diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index 810fdfd37c88b0275129d24934e01c95ecddc821..91c39352fba26212b3b4b48fd33f94f2cf8ae974 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c @@ -192,6 +192,11 @@ struct ak8974 { bool drdy_irq; struct completion drdy_complete; bool drdy_active_low; + /* Ensure timestamp is naturally aligned */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; }; static const char ak8974_reg_avdd[] = "avdd"; @@ -657,7 +662,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) { struct ak8974 *ak8974 = iio_priv(indio_dev); int ret; - __le16 hw_values[8]; /* Three axes + 64bit padding */ pm_runtime_get_sync(&ak8974->i2c->dev); mutex_lock(&ak8974->lock); @@ -667,13 +671,13 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev) dev_err(&ak8974->i2c->dev, "error triggering measure\n"); goto out_unlock; } - ret = ak8974_getresult(ak8974, hw_values); + ret = ak8974_getresult(ak8974, ak8974->scan.channels); if (ret) { dev_err(&ak8974->i2c->dev, "error getting measures\n"); goto out_unlock; } - iio_push_to_buffers_with_timestamp(indio_dev, hw_values, + iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan, iio_get_time_ns(indio_dev)); out_unlock: @@ -862,19 +866,21 @@ static int ak8974_probe(struct i2c_client *i2c, ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config); if (IS_ERR(ak8974->map)) { dev_err(&i2c->dev, "failed to allocate register map\n"); + pm_runtime_put_noidle(&i2c->dev); + pm_runtime_disable(&i2c->dev); return PTR_ERR(ak8974->map); } ret = ak8974_set_power(ak8974, AK8974_PWR_ON); if (ret) { dev_err(&i2c->dev, "could not power on\n"); - goto power_off; + goto disable_pm; } ret = ak8974_detect(ak8974); if (ret) { dev_err(&i2c->dev, "neither AK8974 nor AMI30x found\n"); - goto power_off; + goto disable_pm; } ret = ak8974_selftest(ak8974); @@ -884,14 +890,9 @@ static int ak8974_probe(struct i2c_client *i2c, ret = ak8974_reset(ak8974); if (ret) { dev_err(&i2c->dev, "AK8974 reset failed\n"); - goto power_off; + goto disable_pm; } - pm_runtime_set_autosuspend_delay(&i2c->dev, - AK8974_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(&i2c->dev); - pm_runtime_put(&i2c->dev); - indio_dev->dev.parent = &i2c->dev; switch (ak8974->variant) { case AK8974_WHOAMI_VALUE_AMI306: @@ -957,6 +958,11 @@ static int ak8974_probe(struct i2c_client *i2c, goto cleanup_buffer; } + pm_runtime_set_autosuspend_delay(&i2c->dev, + AK8974_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&i2c->dev); + pm_runtime_put(&i2c->dev); + return 0; cleanup_buffer: @@ -965,7 +971,6 @@ static int ak8974_probe(struct i2c_client *i2c, pm_runtime_put_noidle(&i2c->dev); pm_runtime_disable(&i2c->dev); ak8974_set_power(ak8974, AK8974_PWR_OFF); -power_off: regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs); return ret; diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 2f598ad91621fd70c715d140da665dad977365d9..f5db9fa086f3a192d5a46520d1db32e5a48fa9ba 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -212,16 +212,21 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ms5611_state *st = iio_priv(indio_dev); - s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */ + /* Ensure buffer elements are naturally aligned */ + struct { + s32 channels[2]; + s64 ts __aligned(8); + } scan; int ret; mutex_lock(&st->lock); - ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]); + ret = ms5611_read_temp_and_pressure(indio_dev, &scan.channels[1], + &scan.channels[0]); mutex_unlock(&st->lock); if (ret < 0) goto err; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); err: diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index 37fe851f89af6543747a1e4b14c980caeb912658..799a8dc3e248d2f2cb96957ce3c0152992d8ee04 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -665,8 +665,10 @@ static int zpa2326_resume(const struct iio_dev *indio_dev) int err; err = pm_runtime_get_sync(indio_dev->dev.parent); - if (err < 0) + if (err < 0) { + pm_runtime_put(indio_dev->dev.parent); return err; + } if (err > 0) { /* diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 0d1377232933225d019876ec69c9d08c6d2f2c34..dc0558b23158a015f2420308c0b0bc32c45b8ff4 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3676,10 +3676,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, return ret; } cm_id_priv->id.state = IB_CM_IDLE; + spin_lock_irq(&cm.lock); if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); } + spin_unlock_irq(&cm.lock); return 0; } diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index 655795bfa0ee504d1282464141ea08de118a4ebe..513825e424bffa57efe7a4f47f327961b5e7ba1f 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq) INIT_WORK(&dim->work, ib_cq_rdma_dim_work); } +static void rdma_dim_destroy(struct ib_cq *cq) +{ + if (!cq->dim) + return; + + cancel_work_sync(&cq->dim->work); + kfree(cq->dim); +} + static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) { int rc; @@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, return cq; out_destroy_cq: + rdma_dim_destroy(cq); rdma_restrack_del(&cq->res); cq->device->ops.destroy_cq(cq, udata); out_free_wc: @@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) WARN_ON_ONCE(1); } + rdma_dim_destroy(cq); trace_cq_free(cq); rdma_restrack_del(&cq->res); cq->device->ops.destroy_cq(cq, udata); - if (cq->dim) - cancel_work_sync(&cq->dim->work); - kfree(cq->dim); kfree(cq->wc); kfree(cq); } diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 3027cd2fb247937b5f11cd31c6f3907a3e718b43..6d3ed7c6e19ebb90216e757ec5f699a49b7cf1e5 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -649,9 +649,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj, { struct ib_uverbs_file *ufile = attrs->ufile; - /* alloc_commit consumes the uobj kref */ - uobj->uapi_object->type_class->alloc_commit(uobj); - /* kref is held so long as the uobj is on the uobj list. */ uverbs_uobject_get(uobj); spin_lock_irq(&ufile->uobjects_lock); @@ -661,6 +658,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj, /* matches atomic_set(-1) in alloc_uobj */ atomic_set(&uobj->usecnt, 0); + /* alloc_commit consumes the uobj kref */ + uobj->uapi_object->type_class->alloc_commit(uobj); + /* Matches the down_read in rdma_alloc_begin_uobject */ up_read(&ufile->hw_destroy_rwsem); } diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index a2ed09a3c714a9905d346e80f9d91f69248af9e7..8c930bf1df894b711ea0d43739642f6d5d6f5278 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) return len; } -static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) +static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; void *data; struct ib_sa_mad *mad; int len; + unsigned long flags; + unsigned long delay; + gfp_t gfp_flag; + int ret; + + INIT_LIST_HEAD(&query->list); + query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); mad = query->mad_buf->mad; len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); @@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) /* Repair the nlmsg header length */ nlmsg_end(skb, nlh); - return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask); -} + gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC : + GFP_NOWAIT; -static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) -{ - unsigned long flags; - unsigned long delay; - int ret; + spin_lock_irqsave(&ib_nl_request_lock, flags); + ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag); - INIT_LIST_HEAD(&query->list); - query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); + if (ret) + goto out; - /* Put the request on the list first.*/ - spin_lock_irqsave(&ib_nl_request_lock, flags); + /* Put the request on the list.*/ delay = msecs_to_jiffies(sa_local_svc_timeout_ms); query->timeout = delay + jiffies; list_add_tail(&query->list, &ib_nl_request_list); /* Start the timeout if this is the only request */ if (ib_nl_request_list.next == &query->list) queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); - spin_unlock_irqrestore(&ib_nl_request_lock, flags); - ret = ib_nl_send_msg(query, gfp_mask); - if (ret) { - ret = -EIO; - /* Remove the request */ - spin_lock_irqsave(&ib_nl_request_lock, flags); - list_del(&query->list); - spin_unlock_irqrestore(&ib_nl_request_lock, flags); - } +out: + spin_unlock_irqrestore(&ib_nl_request_lock, flags); return ret; } diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5b87eee8ccc8b6b3e6c015ed7c5a06523d1df735..d03dacaef78805ddcb35d8f97e8417528fe9cc58 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, size_t in_size; int ret; + if (in_len < offsetofend(typeof(cmd), reserved)) + return -EINVAL; in_size = min_t(size_t, in_len, sizeof(cmd)); if (copy_from_user(&cmd, inbuf, in_size)) return -EFAULT; @@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, size_t in_size; int ret; + if (in_len < offsetofend(typeof(cmd), reserved)) + return -EINVAL; in_size = min_t(size_t, in_len, sizeof(cmd)); if (copy_from_user(&cmd, inbuf, in_size)) return -EFAULT; diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 5eed4360695f53fcde3da7f557187b47d67950ae..cb7ad12888219774925200f22723ff381332aa81 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -830,6 +830,29 @@ static int create_workqueues(struct hfi1_devdata *dd) return -ENOMEM; } +/** + * destroy_workqueues - destroy per port workqueues + * @dd: the hfi1_ib device + */ +static void destroy_workqueues(struct hfi1_devdata *dd) +{ + int pidx; + struct hfi1_pportdata *ppd; + + for (pidx = 0; pidx < dd->num_pports; ++pidx) { + ppd = dd->pport + pidx; + + if (ppd->hfi1_wq) { + destroy_workqueue(ppd->hfi1_wq); + ppd->hfi1_wq = NULL; + } + if (ppd->link_wq) { + destroy_workqueue(ppd->link_wq); + ppd->link_wq = NULL; + } + } +} + /** * enable_general_intr() - Enable the IRQs that will be handled by the * general interrupt handler. @@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd) * We can't count on interrupts since we are stopping. */ hfi1_quiet_serdes(ppd); - - if (ppd->hfi1_wq) { - destroy_workqueue(ppd->hfi1_wq); - ppd->hfi1_wq = NULL; - } - if (ppd->link_wq) { - destroy_workqueue(ppd->link_wq); - ppd->link_wq = NULL; - } + if (ppd->hfi1_wq) + flush_workqueue(ppd->hfi1_wq); + if (ppd->link_wq) + flush_workqueue(ppd->link_wq); } sdma_exit(dd); } @@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev) * clear dma engines, etc. */ shutdown_device(dd); + destroy_workqueues(dd); stop_timers(dd); diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 0c2ae9f7b3e8a91b2030168feb05e81ea8c7b077..be62284e42d9aff47007b68f9b3aacaafa0aed96 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) { /* Constraining 10KB packets to 8KB packets */ if (mtu == (enum ib_mtu)OPA_MTU_10240) - mtu = OPA_MTU_8192; + mtu = (enum ib_mtu)OPA_MTU_8192; return opa_mtu_enum_to_int((enum opa_mtu)mtu); } @@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp) struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct hfi1_devdata *dd = ppd->dd; + + if (dd->flags & HFI1_SHUTDOWN) + return true; return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 243b4ba0b6f6b625d7f9bd44354f501d663d0faa..facff133139a9597299c0b151001ab4de0cb718f 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp) struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); - struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); + struct hfi1_devdata *dd = ppd->dd; + + if ((dd->flags & HFI1_SHUTDOWN)) + return true; return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq, priv->s_sde ? diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index dd01a51816ccaae7dee2a787c6f3fa613c87ec8d..0618ced45bf80a5c36d3b8e30e33e0bbd6c371ea 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -3954,6 +3954,15 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, return 0; } +static inline enum ib_mtu get_mtu(struct ib_qp *ibqp, + const struct ib_qp_attr *attr) +{ + if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) + return IB_MTU_4096; + + return attr->path_mtu; +} + static int modify_qp_init_to_rtr(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, struct hns_roce_v2_qp_context *context, @@ -3965,6 +3974,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, struct ib_device *ibdev = &hr_dev->ib_dev; dma_addr_t trrl_ba; dma_addr_t irrl_ba; + enum ib_mtu mtu; u8 port_num; u64 *mtts; u8 *dmac; @@ -4062,23 +4072,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M, V2_QPC_BYTE_52_DMAC_S, 0); - /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */ + mtu = get_mtu(ibqp, attr); + + if (attr_mask & IB_QP_PATH_MTU) { + roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, + V2_QPC_BYTE_24_MTU_S, mtu); + roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, + V2_QPC_BYTE_24_MTU_S, 0); + } + +#define MAX_LP_MSG_LEN 65536 + /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, V2_QPC_BYTE_56_LP_PKTN_INI_S, - ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096)); + ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu))); roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M, V2_QPC_BYTE_56_LP_PKTN_INI_S, 0); - if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) - roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, - V2_QPC_BYTE_24_MTU_S, IB_MTU_4096); - else if (attr_mask & IB_QP_PATH_MTU) - roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, - V2_QPC_BYTE_24_MTU_S, attr->path_mtu); - - roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, - V2_QPC_BYTE_24_MTU_S, 0); - roce_set_bit(qpc_mask->byte_108_rx_reqepsn, V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0); roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M, diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 0e71ebee9e5202d7d2e09fa5b2682b1e2185e547..6b226a5eb7db6fbc808c6420d81675932f437107 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num; buf_attr.page_shift = is_fast ? PAGE_SHIFT : - hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT; + hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT; buf_attr.region[0].size = length; buf_attr.region[0].hopnum = mr->pbl_hop_num; buf_attr.region_count = 1; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 343a8b8361e78e478ac7eef4ae8ccffc0d815898..6f99ed03d88e74c25c62f50ee54325b943f0789c 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, mdev_port_num); if (err) goto out; - ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet); + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); props->active_width = IB_WIDTH_4X; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 7d2ec9ee5097b0903da57d4209b76cfd6e9e354b..77dca1e05bba8db7dc40c6346855c84386302f4b 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) */ synchronize_srcu(&dev->odp_srcu); + /* + * All work on the prefetch list must be completed, xa_erase() prevented + * new work from being created. + */ + wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); + + /* + * At this point it is forbidden for any other thread to enter + * pagefault_mr() on this imr. It is already forbidden to call + * pagefault_mr() on an implicit child. Due to this additions to + * implicit_children are prevented. + */ + + /* + * Block destroy_unused_implicit_child_mr() from incrementing + * num_deferred_work. + */ xa_lock(&imr->implicit_children); xa_for_each (&imr->implicit_children, idx, mtt) { __xa_erase(&imr->implicit_children, idx); @@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) xa_unlock(&imr->implicit_children); /* - * num_deferred_work can only be incremented inside the odp_srcu, or - * under xa_lock while the child is in the xarray. Thus at this point - * it is only decreasing, and all work holding it is now on the wq. + * Wait for any concurrent destroy_unused_implicit_child_mr() to + * complete. */ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); @@ -1781,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd, work->frags[i].mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); if (!work->frags[i].mr) { - work->num_sge = i - 1; - if (i) - destroy_prefetch_work(work); + work->num_sge = i; return false; } @@ -1849,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, srcu_key = srcu_read_lock(&dev->odp_srcu); if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) { srcu_read_unlock(&dev->odp_srcu, srcu_key); + destroy_prefetch_work(work); return -EINVAL; } queue_work(system_unbound_wq, &work->work); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index f939c9b769f044e058486bfe38268915e67e73cd..1225b8d77510f70d8dd503abeb8312e6ee0148c7 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1766,15 +1766,14 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd, } static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, + struct mlx5_ib_qp *qp, struct ib_qp_init_attr *init_attr, - struct mlx5_ib_create_qp *ucmd, void *qpc) { int scqe_sz; bool allow_scat_cqe = false; - if (ucmd) - allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; + allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) return; @@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, u32 *in; int err; - mutex_init(&qp->mutex); - if (attr->sq_sig_type == IB_SIGNAL_ALL_WR) qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; @@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 *in; int err; - mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); @@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, } if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) && (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC)) - configure_requester_scat_cqe(dev, init_attr, ucmd, qpc); + configure_requester_scat_cqe(dev, qp, init_attr, qpc); if (qp->rq.wqe_cnt) { MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); @@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, u32 *in; int err; - mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); @@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag, return; } - if (flag == MLX5_QP_FLAG_SCATTER_CQE) { + switch (flag) { + case MLX5_QP_FLAG_SCATTER_CQE: + case MLX5_QP_FLAG_ALLOW_SCATTER_CQE: /* - * We don't return error if this flag was provided, - * and mlx5 doesn't have right capability. - */ - *flags &= ~MLX5_QP_FLAG_SCATTER_CQE; + * We don't return error if these flags were provided, + * and mlx5 doesn't have right capability. + */ + *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE | + MLX5_QP_FLAG_ALLOW_SCATTER_CQE); return; + default: + break; } mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag); } @@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp); process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE, MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); + process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE, + MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); if (qp->type == IB_QPT_RAW_PACKET) { cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || @@ -2668,6 +2670,10 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) return (create_flags) ? -EINVAL : 0; + process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP, + mlx5_get_flow_namespace(dev->mdev, + MLX5_FLOW_NAMESPACE_BYPASS), + qp); process_create_flag(dev, &create_flags, IB_QP_CREATE_INTEGRITY_EN, MLX5_CAP_GEN(mdev, sho), qp); @@ -2959,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, goto free_ucmd; } + mutex_init(&qp->mutex); qp->type = type; if (udata) { err = process_vendor_flags(dev, qp, params.ucmd, attr); @@ -3001,11 +3008,12 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr, mlx5_ib_destroy_dct(qp); } else { /* - * The two lines below are temp solution till QP allocation + * These lines below are temp solution till QP allocation * will be moved to be under IB/core responsiblity. */ qp->ibqp.send_cq = attr->send_cq; qp->ibqp.recv_cq = attr->recv_cq; + qp->ibqp.pd = pd; destroy_qp_common(dev, qp, udata); } diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c index 6f5eadc4d1832e72c8bbc0b33a55b8f42ef3a9ca..37aaacebd3f26ea652f1f2c5cd3a3e464fe2daf6 100644 --- a/drivers/infiniband/hw/mlx5/srq_cmd.c +++ b/drivers/infiniband/hw/mlx5/srq_cmd.c @@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn) struct mlx5_srq_table *table = &dev->srq_table; struct mlx5_core_srq *srq; - xa_lock(&table->array); + xa_lock_irq(&table->array); srq = xa_load(&table->array, srqn); if (srq) refcount_inc(&srq->common.refcount); - xa_unlock(&table->array); + xa_unlock_irq(&table->array); return srq; } diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 7db35dd6ad74c302bcb5057bc542faf371e7f16b..332a8ba94b81b0c5e00b8f6d569181c0b249a274 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, qp->s_tail_ack_queue = 0; qp->s_acked_ack_queue = 0; qp->s_num_rd_atomic = 0; - if (qp->r_rq.kwq) - qp->r_rq.kwq->count = qp->r_rq.size; qp->r_sge.num_sge = 0; atomic_set(&qp->s_reserved_used, 0); } @@ -2366,31 +2364,6 @@ static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) return 0; } -/** - * get_count - count numbers of request work queue entries - * in circular buffer - * @rq: data structure for request queue entry - * @tail: tail indices of the circular buffer - * @head: head indices of the circular buffer - * - * Return - total number of entries in the circular buffer - */ -static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head) -{ - u32 count; - - count = head; - - if (count >= rq->size) - count = 0; - if (count < tail) - count += rq->size - tail; - else - count -= tail; - - return count; -} - /** * get_rvt_head - get head indices of the circular buffer * @rq: data structure for request queue entry @@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) { head = get_rvt_head(rq, ip); - kwq->count = get_count(rq, tail, head); + kwq->count = rvt_get_rq_count(rq, head, tail); } if (unlikely(kwq->count == 0)) { ret = 0; @@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) * the number of remaining WQEs. */ if (kwq->count < srq->limit) { - kwq->count = get_count(rq, tail, get_rvt_head(rq, ip)); + kwq->count = + rvt_get_rq_count(rq, + get_rvt_head(rq, ip), tail); if (kwq->count < srq->limit) { struct ib_event ev; diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c index 977906cc0d11a0a9507dea1d07dd1d182d122ca6..c58735f4c94adb9a324244aee016051af8889ae1 100644 --- a/drivers/infiniband/sw/rdmavt/rc.c +++ b/drivers/infiniband/sw/rdmavt/rc.c @@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp) * not atomic, which is OK, since the fuzziness is * resolved as further ACKs go out. */ - credits = head - tail; - if ((int)credits < 0) - credits += qp->r_rq.size; + credits = rvt_get_rq_count(&qp->r_rq, head, tail); } /* * Binary search the credit table to find the code to diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index a0b8cc643c5cfcf6342f8b8f170f2ee42f8196f4..ed60c9e4643e84b16cee9be592b440e845cba130 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -67,12 +67,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name) static int dev_id = 1; int rv; + sdev->vendor_part_id = dev_id++; + rv = ib_register_device(base_dev, name); if (rv) { pr_warn("siw: device registration error %d\n", rv); return rv; } - sdev->vendor_part_id = dev_id++; siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 3f9354baac4b3e4a2c09176ea59c54f99bd9436c..6291fb5fa015a47ab25ed2dcc133acf8358c8d17 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -951,6 +951,8 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet) u8 hover_info = packet[ETP_HOVER_INFO_OFFSET]; bool contact_valid, hover_event; + pm_wakeup_event(&data->client->dev, 0); + hover_event = hover_info & 0x40; for (i = 0; i < ETP_MAX_FINGERS; i++) { contact_valid = tp_info & (1U << (3 + i)); @@ -974,6 +976,8 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report) u8 *packet = &report[ETP_REPORT_ID_OFFSET + 1]; int x, y; + pm_wakeup_event(&data->client->dev, 0); + if (!data->tp_input) { dev_warn_once(&data->client->dev, "received a trackpoint report while no trackpoint device has been created. Please report upstream.\n"); @@ -998,7 +1002,6 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report) static irqreturn_t elan_isr(int irq, void *dev_id) { struct elan_tp_data *data = dev_id; - struct device *dev = &data->client->dev; int error; u8 report[ETP_MAX_REPORT_LEN]; @@ -1016,8 +1019,6 @@ static irqreturn_t elan_isr(int irq, void *dev_id) if (error) goto out; - pm_wakeup_event(dev, 0); - switch (report[ETP_REPORT_ID_OFFSET]) { case ETP_REPORT_ID: elan_report_absolute(data, report); @@ -1026,7 +1027,7 @@ static irqreturn_t elan_isr(int irq, void *dev_id) elan_report_trackpoint(data, report); break; default: - dev_err(dev, "invalid report id data (%x)\n", + dev_err(&data->client->dev, "invalid report id data (%x)\n", report[ETP_REPORT_ID_OFFSET]); } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 758dae8d650066006189ef429c589ddedf87bf27..4b81b2d0fe067af3a97da105b529d573ace25c1a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -179,6 +179,7 @@ static const char * const smbus_pnp_ids[] = { "LEN0093", /* T480 */ "LEN0096", /* X280 */ "LEN0097", /* X280 -> ALPS trackpoint */ + "LEN0099", /* X1 Extreme 1st */ "LEN009b", /* T580 */ "LEN200f", /* T450s */ "LEN2044", /* L470 */ diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 7b08ff8ddf35708391904f62a0917b5d62f5444c..7d7f737027264eb8cf33c45e815c155de3149ea6 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -425,6 +425,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), }, }, + { + /* Lenovo XiaoXin Air 12 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "80UN"), + }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 233cb1085bbdd0f93469e2b405bb109f189d39ca..5477a5718202ac425c64ac9f3cdfc64f7ff89036 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1325,7 +1325,6 @@ static int elants_i2c_probe(struct i2c_client *client, 0, MT_TOOL_PALM, 0, 0); input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res); input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); - input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1); touchscreen_parse_properties(ts->input, true, &ts->prop); diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index e5f9987445011a26614fc1f1cba1d17167f65c01..9e1ab701785c72d37ba0adac1d4827c995a770c4 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -243,6 +243,7 @@ static int aggregate_requests(struct icc_node *node) { struct icc_provider *p = node->provider; struct icc_req *r; + u32 avg_bw, peak_bw; node->avg_bw = 0; node->peak_bw = 0; @@ -251,9 +252,14 @@ static int aggregate_requests(struct icc_node *node) p->pre_aggregate(node); hlist_for_each_entry(r, &node->req_list, req_node) { - if (!r->enabled) - continue; - p->aggregate(node, r->tag, r->avg_bw, r->peak_bw, + if (r->enabled) { + avg_bw = r->avg_bw; + peak_bw = r->peak_bw; + } else { + avg_bw = 0; + peak_bw = 0; + } + p->aggregate(node, r->tag, avg_bw, peak_bw, &node->avg_bw, &node->peak_bw); } diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c index e94f3c5228b7cc3aac31218c7fa923dc28aa9a00..42c6c558166265438f80e962bd1f4a18ef41840d 100644 --- a/drivers/interconnect/qcom/msm8916.c +++ b/drivers/interconnect/qcom/msm8916.c @@ -197,13 +197,13 @@ DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS, DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS); DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0); DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS); -DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS); -DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG); -DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB); -DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG); -DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG); -DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1); -DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG); +DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 4, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS); +DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 4, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG); +DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 4, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB); +DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 4, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG); +DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 4, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG); +DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 4, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1); +DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 4, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG); DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV); DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1); DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 6dc49ed8377a5c12bfb135c89021b9f3b48747ce..b0f308cb7f7c2fc2af592b63b38ef5762f9c9a83 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -305,6 +305,7 @@ config ROCKCHIP_IOMMU config SUN50I_IOMMU bool "Allwinner H6 IOMMU Support" + depends on HAS_DMA depends on ARCH_SUNXI || COMPILE_TEST select ARM_DMA_USE_IOMMU select IOMMU_API diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index f892992c8744dfe28d3216d41d68bbcfd553bfcd..57309716fd180adfbb609db7e195e7a036c3acd6 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -102,7 +102,7 @@ extern int __init add_special_device(u8 type, u8 id, u16 *devid, #ifdef CONFIG_DMI void amd_iommu_apply_ivrs_quirks(void); #else -static void amd_iommu_apply_ivrs_quirks(void) { } +static inline void amd_iommu_apply_ivrs_quirks(void) { } #endif #endif diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 74cca17571725d47cb00923c2fb391de7b9c7d2e..2f22326ee4dfe87887c2b2ed63b9f8a8245d76c5 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3985,9 +3985,10 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) if (!fn) return -ENOMEM; iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu); - irq_domain_free_fwnode(fn); - if (!iommu->ir_domain) + if (!iommu->ir_domain) { + irq_domain_free_fwnode(fn); return -ENOMEM; + } iommu->ir_domain->parent = arch_get_ir_parent_domain(); iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c index cf01d0215a3974c3c2a912eef8183911e5e82b64..be4318044f96c19d1055a07231f87dc911f8ebe5 100644 --- a/drivers/iommu/arm-smmu-qcom.c +++ b/drivers/iommu/arm-smmu-qcom.c @@ -12,7 +12,7 @@ struct qcom_smmu { struct arm_smmu_device smmu; }; -static const struct of_device_id qcom_smmu_client_of_match[] = { +static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { .compatible = "qcom,adreno" }, { .compatible = "qcom,mdp4" }, { .compatible = "qcom,mdss" }, diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c index 3c0c67a99c7b6407a589d927af15dd4be74e4a02..8919c1c70b68ac8b6007604c689fb3ba746b073f 100644 --- a/drivers/iommu/hyperv-iommu.c +++ b/drivers/iommu/hyperv-iommu.c @@ -155,7 +155,10 @@ static int __init hyperv_prepare_irq_remapping(void) 0, IOAPIC_REMAPPING_ENTRY, fn, &hyperv_ir_domain_ops, NULL); - irq_domain_free_fwnode(fn); + if (!ioapic_ir_domain) { + irq_domain_free_fwnode(fn); + return -ENOMEM; + } /* * Hyper-V doesn't provide irq remapping function for diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index 7f87698008150298b1e197c3304ce0b23f966a56..9564d23d094f05d36ee349bff704b31cce5af3e4 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -563,8 +563,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) 0, INTR_REMAP_TABLE_ENTRIES, fn, &intel_ir_domain_ops, iommu); - irq_domain_free_fwnode(fn); if (!iommu->ir_domain) { + irq_domain_free_fwnode(fn); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); goto out_free_bitmap; } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index d43120eb1dc56ef9d83338339d71f53c235db906..b6858adc4f173db6f2a2693e143b939ced819d88 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -295,10 +295,10 @@ void iommu_release_device(struct device *dev) return; iommu_device_unlink(dev->iommu->iommu_dev, dev); - iommu_group_remove_device(dev); ops->release_device(dev); + iommu_group_remove_device(dev); module_put(ops->owner); dev_iommu_free(dev); } diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index c3e1fbd1988cf2bd51d0947313335cba44e3047b..d176df569af8fae92c7fd26628d3a4d6c480c463 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -65,6 +65,7 @@ struct qcom_iommu_domain { struct mutex init_mutex; /* Protects iommu pointer */ struct iommu_domain domain; struct qcom_iommu_dev *iommu; + struct iommu_fwspec *fwspec; }; static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) @@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev) return dev_iommu_priv_get(dev); } -static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid) +static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); + struct qcom_iommu_dev *qcom_iommu = d->iommu; if (!qcom_iommu) return NULL; return qcom_iommu->ctxs[asid - 1]; @@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) static void qcom_iommu_tlb_sync(void *cookie) { - struct iommu_fwspec *fwspec; - struct device *dev = cookie; + struct qcom_iommu_domain *qcom_domain = cookie; + struct iommu_fwspec *fwspec = qcom_domain->fwspec; unsigned i; - fwspec = dev_iommu_fwspec_get(dev); - for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); unsigned int val, ret; iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); @@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie) static void qcom_iommu_tlb_inv_context(void *cookie) { - struct device *dev = cookie; - struct iommu_fwspec *fwspec; + struct qcom_iommu_domain *qcom_domain = cookie; + struct iommu_fwspec *fwspec = qcom_domain->fwspec; unsigned i; - fwspec = dev_iommu_fwspec_get(dev); - for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); } @@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie) static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { - struct device *dev = cookie; - struct iommu_fwspec *fwspec; + struct qcom_iommu_domain *qcom_domain = cookie; + struct iommu_fwspec *fwspec = qcom_domain->fwspec; unsigned i, reg; reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; - fwspec = dev_iommu_fwspec_get(dev); - for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); size_t s = size; iova = (iova >> 12) << 12; @@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, }; qcom_domain->iommu = qcom_iommu; - pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev); + qcom_domain->fwspec = fwspec; + + pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain); if (!pgtbl_ops) { dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); ret = -ENOMEM; @@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, domain->geometry.force_aperture = true; for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); if (!ctx->secure_init) { ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); @@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de pm_runtime_get_sync(qcom_iommu->dev); for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]); /* Disable the context bank: */ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index fce605e96aa2451506d3c70584ea0b6716e5f4a7..3b1bf2fb94f59524ed55053aa915c8985b98a111 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -313,9 +313,9 @@ static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu) IOMMU_TLB_FLUSH_MICRO_TLB(1) | IOMMU_TLB_FLUSH_MICRO_TLB(0)); - ret = readl_poll_timeout(iommu->base + IOMMU_TLB_FLUSH_REG, - reg, !reg, - 1, 2000); + ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG, + reg, !reg, + 1, 2000); if (ret) dev_warn(iommu->dev, "TLB Flush timed out!\n"); @@ -556,7 +556,6 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova { struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain); phys_addr_t pt_phys; - dma_addr_t pte_dma; u32 *pte_addr; u32 dte; @@ -566,7 +565,6 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova pt_phys = sun50i_dte_get_pt_address(dte); pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova); - pte_dma = pt_phys + sun50i_iova_get_pte_index(iova) * PT_ENTRY_SIZE; if (!sun50i_pte_is_page_valid(*pte_addr)) return 0; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 81dc5ff0890956545c97c8f2f4c176a8c650c0ca..a83a1de1e03fa8edfb04f4081429342f1b79a8f8 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2420,7 +2420,7 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; /* the following test is not needed, but it tests the replay code */ - if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev) + if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) return; spin_lock_irq(&ic->endio_wait.lock); @@ -2481,7 +2481,7 @@ static void integrity_recalc(struct work_struct *w) next_chunk: - if (unlikely(dm_suspended(ic->ti))) + if (unlikely(dm_post_suspending(ic->ti))) goto unlock_ret; range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index f60c025121215bf6fa39f31c32858e34764dc1dd..85e0daabad49cf4b445120bbf96d504af8b51532 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -146,10 +146,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) */ static void rq_completed(struct mapped_device *md) { - /* nudge anyone waiting on suspend queue */ - if (unlikely(wq_has_sleeper(&md->wait))) - wake_up(&md->wait); - /* * dm_put() must be at the end of this function. See the comment above */ diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 30505d70f42386bfa963c039c4b9983152f66019..5358894bb9fdc155348013b7f0fa6f90f7bdfbd2 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -2266,6 +2266,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) } if (WC_MODE_PMEM(wc)) { + if (!dax_synchronous(wc->ssd_dev->dax_dev)) { + r = -EOPNOTSUPP; + ti->error = "Asynchronous persistent memory not supported as pmem cache"; + goto bad; + } + r = persistent_memory_claim(wc); if (r) { ti->error = "Unable to map persistent memory for cache"; diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 5cf6f5f552e047498fd2517424adb2ab86709b8b..b298fefb022eb9062025b753d53e71fc42d73b74 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -2217,8 +2217,15 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx, { struct list_head *list; struct dm_zone *zone; - int i = 0; + int i; + + /* Schedule reclaim to ensure free zones are available */ + if (!(flags & DMZ_ALLOC_RECLAIM)) { + for (i = 0; i < zmd->nr_devs; i++) + dmz_schedule_reclaim(zmd->dev[i].reclaim); + } + i = 0; again: if (flags & DMZ_ALLOC_CACHE) list = &zmd->unmap_cache_list; diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index dd1eebf6e50f11a3b3dd6b11f55471293341ff84..9c0ecc9568a420ba02784160a0d951226cab6b18 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c @@ -456,6 +456,8 @@ static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc) nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx); nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx); } + if (nr_unmap <= 1) + return 0; return nr_unmap * 100 / nr_zones; } @@ -501,7 +503,7 @@ static void dmz_reclaim_work(struct work_struct *work) { struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work); struct dmz_metadata *zmd = zrc->metadata; - unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0; + unsigned int p_unmap; int ret; if (dmz_dev_is_dying(zmd)) @@ -527,9 +529,6 @@ static void dmz_reclaim_work(struct work_struct *work) zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2); } - nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx); - nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx); - DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)", dmz_metadata_label(zmd), zrc->dev_idx, zrc->kc_throttle.throttle, diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index cf915009c306a8226945c501c53618235bf4e73c..42aa5139df7c791c01c008b2e32baa4c114ad983 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -400,15 +400,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); struct dmz_metadata *zmd = dmz->metadata; struct dm_zone *zone; - int i, ret; - - /* - * Write may trigger a zone allocation. So make sure the - * allocation can succeed. - */ - if (bio_op(bio) == REQ_OP_WRITE) - for (i = 0; i < dmz->nr_ddevs; i++) - dmz_schedule_reclaim(dmz->dev[i].reclaim); + int ret; dmz_lock_metadata(zmd); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e6807792fec831673781289992ddf90cef0f675e..5b9de2f71bb076e53a40913c18ea8d4675938570 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -142,6 +143,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); #define DMF_NOFLUSH_SUSPENDING 5 #define DMF_DEFERRED_REMOVE 6 #define DMF_SUSPENDED_INTERNALLY 7 +#define DMF_POST_SUSPENDING 8 #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; @@ -654,28 +656,6 @@ static void free_tio(struct dm_target_io *tio) bio_put(&tio->clone); } -static bool md_in_flight_bios(struct mapped_device *md) -{ - int cpu; - struct hd_struct *part = &dm_disk(md)->part0; - long sum = 0; - - for_each_possible_cpu(cpu) { - sum += part_stat_local_read_cpu(part, in_flight[0], cpu); - sum += part_stat_local_read_cpu(part, in_flight[1], cpu); - } - - return sum != 0; -} - -static bool md_in_flight(struct mapped_device *md) -{ - if (queue_is_mq(md->queue)) - return blk_mq_queue_inflight(md->queue); - else - return md_in_flight_bios(md); -} - u64 dm_start_time_ns_from_clone(struct bio *bio) { struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); @@ -1465,9 +1445,6 @@ static int __send_empty_flush(struct clone_info *ci) BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); - - bio_disassociate_blkg(ci->bio); - return 0; } @@ -1655,6 +1632,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, ci.bio = &flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); + bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else if (op_is_zone_mgmt(bio_op(bio))) { ci.bio = bio; @@ -1729,6 +1707,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, ci.bio = &flush_bio; ci.sector_count = 0; error = __send_empty_flush(&ci); + bio_uninit(ci.bio); /* dec_pending submits any data associated with flush */ } else { struct dm_target_io *tio; @@ -2430,6 +2409,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); } /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ @@ -2470,15 +2450,29 @@ void dm_put(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_put); -static int dm_wait_for_completion(struct mapped_device *md, long task_state) +static bool md_in_flight_bios(struct mapped_device *md) +{ + int cpu; + struct hd_struct *part = &dm_disk(md)->part0; + long sum = 0; + + for_each_possible_cpu(cpu) { + sum += part_stat_local_read_cpu(part, in_flight[0], cpu); + sum += part_stat_local_read_cpu(part, in_flight[1], cpu); + } + + return sum != 0; +} + +static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state) { int r = 0; DEFINE_WAIT(wait); - while (1) { + while (true) { prepare_to_wait(&md->wait, &wait, task_state); - if (!md_in_flight(md)) + if (!md_in_flight_bios(md)) break; if (signal_pending_state(task_state, current)) { @@ -2493,6 +2487,28 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state) return r; } +static int dm_wait_for_completion(struct mapped_device *md, long task_state) +{ + int r = 0; + + if (!queue_is_mq(md->queue)) + return dm_wait_for_bios_completion(md, task_state); + + while (true) { + if (!blk_mq_queue_inflight(md->queue)) + break; + + if (signal_pending_state(task_state, current)) { + r = -EINTR; + break; + } + + msleep(5); + } + + return r; +} + /* * Process the deferred bios */ @@ -2752,7 +2768,9 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) if (r) goto out_unlock; + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); out_unlock: mutex_unlock(&md->suspend_lock); @@ -2849,7 +2867,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, DMF_SUSPENDED_INTERNALLY); + set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); + clear_bit(DMF_POST_SUSPENDING, &md->flags); } static void __dm_internal_resume(struct mapped_device *md) @@ -2926,17 +2946,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, unsigned cookie) { + int r; + unsigned noio_flag; char udev_cookie[DM_COOKIE_LENGTH]; char *envp[] = { udev_cookie, NULL }; + noio_flag = memalloc_noio_save(); + if (!cookie) - return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); + r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); else { snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u", DM_COOKIE_ENV_VAR_NAME, cookie); - return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, - action, envp); + r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, + action, envp); } + + memalloc_noio_restore(noio_flag); + + return r; } uint32_t dm_next_uevent_seq(struct mapped_device *md) @@ -3002,6 +3030,11 @@ int dm_suspended_md(struct mapped_device *md) return test_bit(DMF_SUSPENDED, &md->flags); } +static int dm_post_suspending_md(struct mapped_device *md) +{ + return test_bit(DMF_POST_SUSPENDING, &md->flags); +} + int dm_suspended_internally_md(struct mapped_device *md) { return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); @@ -3018,6 +3051,12 @@ int dm_suspended(struct dm_target *ti) } EXPORT_SYMBOL_GPL(dm_suspended); +int dm_post_suspending(struct dm_target *ti) +{ + return dm_post_suspending_md(dm_table_get_md(ti->table)); +} +EXPORT_SYMBOL_GPL(dm_post_suspending); + int dm_noflush_suspending(struct dm_target *ti) { return __noflush_suspending(dm_table_get_md(ti->table)); diff --git a/drivers/mfd/ioc3.c b/drivers/mfd/ioc3.c index 02998d4eb74b037b40f3dad5158b0dbe5ce5ae9c..74cee7cb0afc994306cf4462d7ed289121b80c1e 100644 --- a/drivers/mfd/ioc3.c +++ b/drivers/mfd/ioc3.c @@ -142,10 +142,11 @@ static int ioc3_irq_domain_setup(struct ioc3_priv_data *ipd, int irq) goto err; domain = irq_domain_create_linear(fn, 24, &ioc3_irq_domain_ops, ipd); - if (!domain) + if (!domain) { + irq_domain_free_fwnode(fn); goto err; + } - irq_domain_free_fwnode(fn); ipd->domain = domain; irq_set_chained_handler_and_data(irq, ioc3_irq_handler, domain); diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index ab4144ea1f116940483a165555bb09176d6e9738..d6cd5537126c69bc76b5c019b17d0b01bb4a7eab 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include @@ -20,7 +20,7 @@ #include "../../sound/soc/atmel/atmel_ssc_dai.h" /* Serialize access to ssc_list and user count */ -static DEFINE_SPINLOCK(user_lock); +static DEFINE_MUTEX(user_lock); static LIST_HEAD(ssc_list); struct ssc_device *ssc_request(unsigned int ssc_num) @@ -28,7 +28,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num) int ssc_valid = 0; struct ssc_device *ssc; - spin_lock(&user_lock); + mutex_lock(&user_lock); list_for_each_entry(ssc, &ssc_list, list) { if (ssc->pdev->dev.of_node) { if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc") @@ -44,18 +44,18 @@ struct ssc_device *ssc_request(unsigned int ssc_num) } if (!ssc_valid) { - spin_unlock(&user_lock); + mutex_unlock(&user_lock); pr_err("ssc: ssc%d platform device is missing\n", ssc_num); return ERR_PTR(-ENODEV); } if (ssc->user) { - spin_unlock(&user_lock); + mutex_unlock(&user_lock); dev_dbg(&ssc->pdev->dev, "module busy\n"); return ERR_PTR(-EBUSY); } ssc->user++; - spin_unlock(&user_lock); + mutex_unlock(&user_lock); clk_prepare(ssc->clk); @@ -67,14 +67,14 @@ void ssc_free(struct ssc_device *ssc) { bool disable_clk = true; - spin_lock(&user_lock); + mutex_lock(&user_lock); if (ssc->user) ssc->user--; else { disable_clk = false; dev_dbg(&ssc->pdev->dev, "device already free\n"); } - spin_unlock(&user_lock); + mutex_unlock(&user_lock); if (disable_clk) clk_unprepare(ssc->clk); @@ -237,9 +237,9 @@ static int ssc_probe(struct platform_device *pdev) return -ENXIO; } - spin_lock(&user_lock); + mutex_lock(&user_lock); list_add_tail(&ssc->list, &ssc_list); - spin_unlock(&user_lock); + mutex_unlock(&user_lock); platform_set_drvdata(pdev, ssc); @@ -258,9 +258,9 @@ static int ssc_remove(struct platform_device *pdev) ssc_sound_dai_remove(ssc); - spin_lock(&user_lock); + mutex_lock(&user_lock); list_del(&ssc->list); - spin_unlock(&user_lock); + mutex_unlock(&user_lock); return 0; } diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index b0f62cbbdc878d945827ffd4063aa956dc2d56a1..f3a8f113865d2b06fc1e587c06da7212756c9000 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -499,11 +499,19 @@ static int validate_queue_index(struct hl_device *hdev, struct asic_fixed_properties *asic = &hdev->asic_prop; struct hw_queue_properties *hw_queue_prop; + /* This must be checked here to prevent out-of-bounds access to + * hw_queues_props array + */ + if (chunk->queue_index >= HL_MAX_QUEUES) { + dev_err(hdev->dev, "Queue index %d is invalid\n", + chunk->queue_index); + return -EINVAL; + } + hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; - if ((chunk->queue_index >= HL_MAX_QUEUES) || - (hw_queue_prop->type == QUEUE_TYPE_NA)) { - dev_err(hdev->dev, "Queue index %d is invalid\n", + if (hw_queue_prop->type == QUEUE_TYPE_NA) { + dev_err(hdev->dev, "Queue index %d is not applicable\n", chunk->queue_index); return -EINVAL; } diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c index fc4372c18ce20798317e4022bd119001201f5a55..0bc036e01ee8df1bfbfa8fccd8f41ba1890a4b3d 100644 --- a/drivers/misc/habanalabs/debugfs.c +++ b/drivers/misc/habanalabs/debugfs.c @@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.i2c_reg = i2c_reg; rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, (long *) val); + 0, (long *) val); if (rc) dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); @@ -63,7 +63,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.value = cpu_to_le64(val); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc); @@ -87,7 +87,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state) pkt.value = cpu_to_le64(state); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc); @@ -981,7 +981,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf, if (*ppos) return 0; - sprintf(tmp_buf, "%d\n", hdev->clock_gating); + sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask); rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, strlen(tmp_buf) + 1); @@ -993,7 +993,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf, { struct hl_dbg_device_entry *entry = file_inode(f)->i_private; struct hl_device *hdev = entry->hdev; - u32 value; + u64 value; ssize_t rc; if (atomic_read(&hdev->in_reset)) { @@ -1002,19 +1002,12 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf, return 0; } - rc = kstrtouint_from_user(buf, count, 10, &value); + rc = kstrtoull_from_user(buf, count, 16, &value); if (rc) return rc; - if (value) { - hdev->clock_gating = 1; - if (hdev->asic_funcs->enable_clock_gating) - hdev->asic_funcs->enable_clock_gating(hdev); - } else { - if (hdev->asic_funcs->disable_clock_gating) - hdev->asic_funcs->disable_clock_gating(hdev); - hdev->clock_gating = 0; - } + hdev->clock_gating_mask = value; + hdev->asic_funcs->set_clock_gating(hdev); return count; } diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index 2b38a119704c4ae2a186a85b9c1c885f31aa9069..59608d1bac880e6795b01809d098ea3b8be592c0 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -608,7 +608,7 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable) hdev->in_debug = 0; if (!hdev->hard_reset_pending) - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); goto out; } diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c index baf790cf4b787fb4f2164fc443096e58c30c763b..d27841cb5bcb3dc2ef56b3a69e687290bc328626 100644 --- a/drivers/misc/habanalabs/firmware_if.c +++ b/drivers/misc/habanalabs/firmware_if.c @@ -61,7 +61,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode) pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT); return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, - sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL); + sizeof(pkt), 0, NULL); } int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, @@ -144,7 +144,7 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type) pkt.value = cpu_to_le64(event_type); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, &result); + 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); @@ -183,7 +183,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, ARMCP_PKT_CTL_OPCODE_SHIFT); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, - total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result); + total_pkt_size, 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask IRQ array\n"); @@ -204,7 +204,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev) test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt, - sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result); + sizeof(test_pkt), 0, &result); if (!rc) { if (result != ARMCP_PACKET_FENCE_VAL) @@ -248,7 +248,7 @@ int hl_fw_send_heartbeat(struct hl_device *hdev) hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt, - sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result); + sizeof(hb_pkt), 0, &result); if ((rc) || (result != ARMCP_PACKET_FENCE_VAL)) rc = -EIO; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 834470d10b46bb5e800517dc0724adb34686e5df..637a9d608707f51e30226e121fad16019659a2bb 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -80,6 +80,7 @@ #define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) #define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) #define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */ +#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ #define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9 @@ -98,6 +99,11 @@ #define GAUDI_ARB_WDT_TIMEOUT 0x1000000 +#define GAUDI_CLK_GATE_DEBUGFS_MASK (\ + BIT(GAUDI_ENGINE_ID_MME_0) |\ + BIT(GAUDI_ENGINE_ID_MME_2) |\ + GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0)) + static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3", @@ -106,14 +112,14 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { }; static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = { - [GAUDI_PCI_DMA_1] = 0, - [GAUDI_PCI_DMA_2] = 1, - [GAUDI_PCI_DMA_3] = 5, - [GAUDI_HBM_DMA_1] = 2, - [GAUDI_HBM_DMA_2] = 3, - [GAUDI_HBM_DMA_3] = 4, - [GAUDI_HBM_DMA_4] = 6, - [GAUDI_HBM_DMA_5] = 7 + [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0, + [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1, + [GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5, + [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2, + [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3, + [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4, + [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6, + [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7 }; static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = { @@ -1819,7 +1825,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev) gaudi_init_rate_limiter(hdev); - gaudi_disable_clock_gating(hdev); + hdev->asic_funcs->disable_clock_gating(hdev); for (tpc_id = 0, tpc_offset = 0; tpc_id < TPC_NUMBER_OF_ENGINES; @@ -2531,46 +2537,55 @@ static void gaudi_tpc_stall(struct hl_device *hdev) WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); } -static void gaudi_enable_clock_gating(struct hl_device *hdev) +static void gaudi_set_clock_gating(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; u32 qman_offset; int i; - if (!hdev->clock_gating) - return; - - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) - return; - /* In case we are during debug session, don't enable the clock gate * as it may interfere */ if (hdev->in_debug) return; - for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) { + for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) { + if (!(hdev->clock_gating_mask & + (BIT_ULL(gaudi_dma_assignment[i])))) + continue; + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); WREG32(mmDMA0_QM_CGM_CFG + qman_offset, QMAN_UPPER_CP_CGM_PWR_GATE_EN); } - for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) { + for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) { + if (!(hdev->clock_gating_mask & + (BIT_ULL(gaudi_dma_assignment[i])))) + continue; + qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET; WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); WREG32(mmDMA0_QM_CGM_CFG + qman_offset, QMAN_COMMON_CP_CGM_PWR_GATE_EN); } - WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); - WREG32(mmMME0_QM_CGM_CFG, - QMAN_COMMON_CP_CGM_PWR_GATE_EN); - WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); - WREG32(mmMME2_QM_CGM_CFG, - QMAN_COMMON_CP_CGM_PWR_GATE_EN); + if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) { + WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); + WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN); + } + + if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) { + WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN); + WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN); + } for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) { + if (!(hdev->clock_gating_mask & + (BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i)))) + continue; + WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN); WREG32(mmTPC0_QM_CGM_CFG + qman_offset, @@ -2663,7 +2678,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset) gaudi_stop_hbm_dma_qmans(hdev); gaudi_stop_pci_dma_qmans(hdev); - gaudi_disable_clock_gating(hdev); + hdev->asic_funcs->disable_clock_gating(hdev); msleep(wait_timeout_ms); @@ -3003,7 +3018,7 @@ static int gaudi_hw_init(struct hl_device *hdev) gaudi_init_tpc_qmans(hdev); - gaudi_enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); gaudi_enable_timestamp(hdev); @@ -3112,7 +3127,9 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) HW_CAP_HBM_DMA | HW_CAP_PLL | HW_CAP_MMU | HW_CAP_SRAM_SCRAMBLER | - HW_CAP_HBM_SCRAMBLER); + HW_CAP_HBM_SCRAMBLER | + HW_CAP_CLK_GATE); + memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); } @@ -3463,6 +3480,9 @@ static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg, return 0; } + if (!timeout) + timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC; + return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len, timeout, result); } @@ -3865,6 +3885,12 @@ static int gaudi_validate_cb(struct hl_device *hdev, rc = -EPERM; break; + case PACKET_WREG_BULK: + dev_err(hdev->dev, + "User not allowed to use WREG_BULK\n"); + rc = -EPERM; + break; + case PACKET_LOAD_AND_EXE: rc = gaudi_validate_load_and_exe_pkt(hdev, parser, (struct packet_load_and_exe *) user_pkt); @@ -3880,7 +3906,6 @@ static int gaudi_validate_cb(struct hl_device *hdev, break; case PACKET_WREG_32: - case PACKET_WREG_BULK: case PACKET_MSG_LONG: case PACKET_MSG_SHORT: case PACKET_REPEAT: @@ -4521,13 +4546,18 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val) int rc = 0; if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't read register - clock gating is enabled!\n"); rc = -EFAULT; } else { *val = RREG32(addr - CFG_BASE); } + } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { *val = readl(hdev->pcie_bar[SRAM_BAR_ID] + @@ -4563,13 +4593,18 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val) int rc = 0; if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't write register - clock gating is enabled!\n"); rc = -EFAULT; } else { WREG32(addr - CFG_BASE, val); } + } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) { writel(val, hdev->pcie_bar[SRAM_BAR_ID] + @@ -4605,7 +4640,11 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val) int rc = 0; if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't read register - clock gating is enabled!\n"); rc = -EFAULT; @@ -4615,6 +4654,7 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val) *val = (((u64) val_h) << 32) | val_l; } + } else if ((addr >= SRAM_BASE_ADDR) && (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { *val = readq(hdev->pcie_bar[SRAM_BAR_ID] + @@ -4651,7 +4691,11 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val) int rc = 0; if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) { - if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) { + + if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) && + (hdev->clock_gating_mask & + GAUDI_CLK_GATE_DEBUGFS_MASK)) { + dev_err_ratelimited(hdev->dev, "Can't write register - clock gating is enabled!\n"); rc = -EFAULT; @@ -4660,6 +4704,7 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val) WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val)); } + } else if ((addr >= SRAM_BASE_ADDR) && (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) { writeq(val, hdev->pcie_bar[SRAM_BAR_ID] + @@ -4881,7 +4926,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid) gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid); gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); } @@ -5262,7 +5307,7 @@ static void gaudi_print_ecc_info_generic(struct hl_device *hdev, } if (disable_clock_gating) { - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); } } @@ -5749,7 +5794,7 @@ static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id, /* Clear interrupts */ WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); @@ -6265,7 +6310,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask, if (s) seq_puts(s, "\n"); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); @@ -6366,7 +6411,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, dev_err(hdev->dev, "Timeout while waiting for TPC%d icache prefetch\n", tpc_id); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); return -EIO; } @@ -6395,7 +6440,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, 1000, kernel_timeout); - hdev->asic_funcs->enable_clock_gating(hdev); + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); if (rc) { @@ -6736,7 +6781,7 @@ static const struct hl_asic_funcs gaudi_funcs = { .mmu_invalidate_cache = gaudi_mmu_invalidate_cache, .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range, .send_heartbeat = gaudi_send_heartbeat, - .enable_clock_gating = gaudi_enable_clock_gating, + .set_clock_gating = gaudi_set_clock_gating, .disable_clock_gating = gaudi_disable_clock_gating, .debug_coresight = gaudi_debug_coresight, .is_device_idle = gaudi_is_device_idle, diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 0d2952bb58dfb378ae341b76499f097fd4b49f24..88460b2138d88bfe80e10e41b349e7c9c4986df3 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -88,6 +88,7 @@ #define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100) #define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) #define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */ +#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ #define GOYA_QMAN0_FENCE_VAL 0xD169B243 @@ -2830,6 +2831,9 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, return 0; } + if (!timeout) + timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC; + return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len, timeout, result); } @@ -4431,8 +4435,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr, pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << ARMCP_PKT_CTL_OPCODE_SHIFT); - rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size, - HL_DEVICE_TIMEOUT_USEC, &result); + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, + total_pkt_size, 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask IRQ array\n"); @@ -4464,8 +4468,8 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type) ARMCP_PKT_CTL_OPCODE_SHIFT); pkt.value = cpu_to_le64(event_type); - rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - HL_DEVICE_TIMEOUT_USEC, &result); + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + 0, &result); if (rc) dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); @@ -5028,14 +5032,14 @@ int goya_armcp_info_get(struct hl_device *hdev) return 0; } -static void goya_enable_clock_gating(struct hl_device *hdev) +static void goya_set_clock_gating(struct hl_device *hdev) { - + /* clock gating not supported in Goya */ } static void goya_disable_clock_gating(struct hl_device *hdev) { - + /* clock gating not supported in Goya */ } static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask, @@ -5259,7 +5263,7 @@ static const struct hl_asic_funcs goya_funcs = { .mmu_invalidate_cache = goya_mmu_invalidate_cache, .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range, .send_heartbeat = goya_send_heartbeat, - .enable_clock_gating = goya_enable_clock_gating, + .set_clock_gating = goya_set_clock_gating, .disable_clock_gating = goya_disable_clock_gating, .debug_coresight = goya_debug_coresight, .is_device_idle = goya_is_device_idle, diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 1ecdcf8b763a24046acd97f900e90a27d9d78a35..194d8335269642e8edb2e3c25f858d8392335da0 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -578,8 +578,9 @@ enum hl_pll_frequency { * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with * ASID-VA-size mask. * @send_heartbeat: send is-alive packet to ArmCP and verify response. - * @enable_clock_gating: enable clock gating for reducing power consumption. - * @disable_clock_gating: disable clock for accessing registers on HBW. + * @set_clock_gating: enable/disable clock gating per engine according to + * clock gating mask in hdev + * @disable_clock_gating: disable clock gating completely * @debug_coresight: perform certain actions on Coresight for debugging. * @is_device_idle: return true if device is idle, false otherwise. * @soft_reset_late_init: perform certain actions needed after soft reset. @@ -587,7 +588,11 @@ enum hl_pll_frequency { * @hw_queues_unlock: release H/W queues lock. * @get_pci_id: retrieve PCI ID. * @get_eeprom_data: retrieve EEPROM data from F/W. - * @send_cpu_message: send buffer to ArmCP. + * @send_cpu_message: send message to F/W. If the message is timedout, the + * driver will eventually reset the device. The timeout can + * be determined by the calling function or it can be 0 and + * then the timeout is the default timeout for the specific + * ASIC * @get_hw_state: retrieve the H/W state * @pci_bars_map: Map PCI BARs. * @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns @@ -680,7 +685,7 @@ struct hl_asic_funcs { int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard, u32 asid, u64 va, u64 size); int (*send_heartbeat)(struct hl_device *hdev); - void (*enable_clock_gating)(struct hl_device *hdev); + void (*set_clock_gating)(struct hl_device *hdev); void (*disable_clock_gating)(struct hl_device *hdev); int (*debug_coresight)(struct hl_device *hdev, void *data); bool (*is_device_idle)(struct hl_device *hdev, u32 *mask, @@ -1398,6 +1403,9 @@ struct hl_device_idle_busy_ts { * @max_power: the max power of the device, as configured by the sysadmin. This * value is saved so in case of hard-reset, the driver will restore * this value and update the F/W after the re-initialization + * @clock_gating_mask: is clock gating enabled. bitmask that represents the + * different engines. See debugfs-driver-habanalabs for + * details. * @in_reset: is device in reset flow. * @curr_pll_profile: current PLL profile. * @cs_active_cnt: number of active command submissions on this device (active @@ -1425,7 +1433,6 @@ struct hl_device_idle_busy_ts { * @init_done: is the initialization of the device done. * @mmu_enable: is MMU enabled. * @mmu_huge_page_opt: is MMU huge pages optimization enabled. - * @clock_gating: is clock gating enabled. * @device_cpu_disabled: is the device CPU disabled (due to timeouts) * @dma_mask: the dma mask that was set for this device * @in_debug: is device under debug. This, together with fpriv_list, enforces @@ -1493,6 +1500,7 @@ struct hl_device { atomic64_t dram_used_mem; u64 timeout_jiffies; u64 max_power; + u64 clock_gating_mask; atomic_t in_reset; enum hl_pll_frequency curr_pll_profile; int cs_active_cnt; @@ -1514,7 +1522,6 @@ struct hl_device { u8 dram_default_page_mapping; u8 pmmu_huge_range; u8 init_done; - u8 clock_gating; u8 device_cpu_disabled; u8 dma_mask; u8 in_debug; diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c index 8652c7e5d7f1071b81069a742cd8cc93909d7c9c..22716da9f85fce6bb58634084797557c50f5d59a 100644 --- a/drivers/misc/habanalabs/habanalabs_drv.c +++ b/drivers/misc/habanalabs/habanalabs_drv.c @@ -232,7 +232,7 @@ static void set_driver_behavior_per_device(struct hl_device *hdev) hdev->fw_loading = 1; hdev->cpu_queues_enable = 1; hdev->heartbeat = 1; - hdev->clock_gating = 1; + hdev->clock_gating_mask = ULONG_MAX; hdev->reset_pcilink = 0; hdev->axi_drain = 0; diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c index 8c6cd77e6af6bd3b6ada7cd114867d9d528191a7..b997336fa75fc88ad7a565163697b789e977f4ca 100644 --- a/drivers/misc/habanalabs/hwmon.c +++ b/drivers/misc/habanalabs/hwmon.c @@ -10,7 +10,6 @@ #include #include -#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */ #define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1) int hl_build_hwmon_channel_info(struct hl_device *hdev, @@ -323,7 +322,7 @@ int hl_get_temperature(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -350,7 +349,7 @@ int hl_set_temperature(struct hl_device *hdev, pkt.value = __cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -374,7 +373,7 @@ int hl_get_voltage(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -400,7 +399,7 @@ int hl_get_current(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -426,7 +425,7 @@ int hl_get_fan_speed(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -452,7 +451,7 @@ int hl_get_pwm_info(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, value); + 0, value); if (rc) { dev_err(hdev->dev, @@ -479,7 +478,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, pkt.value = cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -502,7 +501,7 @@ int hl_set_voltage(struct hl_device *hdev, pkt.value = __cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -527,7 +526,7 @@ int hl_set_current(struct hl_device *hdev, pkt.value = __cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SENSORS_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c index 5d78d5e1c7826163601cacdd12bac6a1a87e4e3e..70b6b1863c2ef3ee4042d57caef2672bda7d19c6 100644 --- a/drivers/misc/habanalabs/sysfs.c +++ b/drivers/misc/habanalabs/sysfs.c @@ -9,9 +9,6 @@ #include -#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */ -#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */ - long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) { struct armcp_packet pkt; @@ -29,7 +26,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) pkt.pll_index = cpu_to_le32(pll_index); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_CLK_PKT_TIMEOUT, &result); + 0, &result); if (rc) { dev_err(hdev->dev, @@ -54,7 +51,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq) pkt.value = cpu_to_le64(freq); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_CLK_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, @@ -74,7 +71,7 @@ u64 hl_get_max_power(struct hl_device *hdev) ARMCP_PKT_CTL_OPCODE_SHIFT); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_PWR_PKT_TIMEOUT, &result); + 0, &result); if (rc) { dev_err(hdev->dev, "Failed to get max power, error %d\n", rc); @@ -96,7 +93,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value) pkt.value = cpu_to_le64(value); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - SET_PWR_PKT_TIMEOUT, NULL); + 0, NULL); if (rc) dev_err(hdev->dev, "Failed to set max power, error %d\n", rc); diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 8d468e0a950a6cd659d310b2855539e3a3d1699d..f476dbc7252b9d2e109daa088a82b270b56cb94d 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -745,9 +745,8 @@ static int mei_cl_device_remove(struct device *dev) mei_cl_bus_module_put(cldev); module_put(THIS_MODULE); - dev->driver = NULL; - return ret; + return ret; } static ssize_t name_show(struct device *dev, struct device_attribute *a, diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 7eb38d7482c6d97d6ef16024a13759ac7af1c635..08a3b1c05acb9741c422553c446df5eda9e76fa9 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1146,9 +1146,11 @@ static int meson_mmc_probe(struct platform_device *pdev) mmc->caps |= MMC_CAP_CMD23; if (host->dram_access_quirk) { + /* Limit segments to 1 due to low available sram memory */ + mmc->max_segs = 1; /* Limit to the available sram memory */ - mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size; - mmc->max_blk_count = mmc->max_segs; + mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN / + mmc->max_blk_size; } else { mmc->max_blk_count = CMD_CFG_LENGTH_MASK; mmc->max_segs = SD_EMMC_DESC_BUF_LEN / diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c index 5e20c099fe03a8d538dd3305fecbfd1136fc42cd..df43f42855e2e87bdfbe254a2640a49564bc5a83 100644 --- a/drivers/mmc/host/owl-mmc.c +++ b/drivers/mmc/host/owl-mmc.c @@ -689,7 +689,7 @@ MODULE_DEVICE_TABLE(of, owl_mmc_of_match); static struct platform_driver owl_mmc_driver = { .driver = { .name = "owl_mmc", - .of_match_table = of_match_ptr(owl_mmc_of_match), + .of_match_table = owl_mmc_of_match, }, .probe = owl_mmc_probe, .remove = owl_mmc_remove, diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index b277dd7fbdb5d56e7520d36491e3981378b5a42b..c0d58e9fcc33365a746bc005f601021e5cb69989 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -618,8 +618,9 @@ static int msm_init_cm_dll(struct sdhci_host *host) config &= ~CORE_CLK_PWRSAVE; writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); - config = msm_host->dll_config; - writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config); + if (msm_host->dll_config) + writel_relaxed(msm_host->dll_config, + host->ioaddr + msm_offset->core_dll_config); if (msm_host->use_14lpp_dll_reset) { config = readl_relaxed(host->ioaddr + diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c index 56912e30c47ec64c7ce2050c42cfb91241497827..a1bcc0f4ba9e4e19e79c050a627c12a11493810d 100644 --- a/drivers/mmc/host/sdhci-of-aspeed.c +++ b/drivers/mmc/host/sdhci-of-aspeed.c @@ -68,7 +68,7 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) if (WARN_ON(clock > host->max_clk)) clock = host->max_clk; - for (div = 1; div < 256; div *= 2) { + for (div = 2; div < 256; div *= 2) { if ((parent / div) <= clock) break; } diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 3dd46cd551145e74e80a136f7fac410ce2ffb430..88e7900853db900716602562a476a59a6b3c1a99 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -407,19 +407,34 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, return err; } +static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto) +{ + if (bareudp->ethertype == proto) + return true; + + if (!bareudp->multi_proto_mode) + return false; + + if (bareudp->ethertype == htons(ETH_P_MPLS_UC) && + proto == htons(ETH_P_MPLS_MC)) + return true; + + if (bareudp->ethertype == htons(ETH_P_IP) && + proto == htons(ETH_P_IPV6)) + return true; + + return false; +} + static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) { struct bareudp_dev *bareudp = netdev_priv(dev); struct ip_tunnel_info *info = NULL; int err; - if (skb->protocol != bareudp->ethertype) { - if (!bareudp->multi_proto_mode || - (skb->protocol != htons(ETH_P_MPLS_MC) && - skb->protocol != htons(ETH_P_IPV6))) { - err = -EINVAL; - goto tx_error; - } + if (!bareudp_proto_valid(bareudp, skb->protocol)) { + err = -EINVAL; + goto tx_error; } info = skb_tunnel_info(skb); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 004919aea5fbf8e5fd44734dd7b89d5a7f8e9d5e..f88cb097b022ab1af55568875f9a4667a8988722 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5053,15 +5053,19 @@ int bond_create(struct net *net, const char *name) bond_dev->rtnl_link_ops = &bond_link_ops; res = register_netdevice(bond_dev); + if (res < 0) { + free_netdev(bond_dev); + rtnl_unlock(); + + return res; + } netif_carrier_off(bond_dev); bond_work_init_all(bond); rtnl_unlock(); - if (res < 0) - free_netdev(bond_dev); - return res; + return 0; } static int __net_init bond_net_init(struct net *net) diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index b43b51646b11a6b7b9a8284cbb48d3d456cfdd78..f0f9138e967f3a1f4be094641c10013b2dde55b0 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -456,11 +456,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev, return err; err = register_netdevice(bond_dev); - - netif_carrier_off(bond_dev); if (!err) { struct bonding *bond = netdev_priv(bond_dev); + netif_carrier_off(bond_dev); bond_work_init_all(bond); } diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 47d65b77caf77415b730f0ce3ca3b62332e0b2e0..7c17b0f705ec37d626a5298b4b5c8150e5dc7bc3 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1268,6 +1268,9 @@ static int ksz8795_switch_init(struct ksz_device *dev) return -ENOMEM; } + /* set the real number of ports */ + dev->ds->num_ports = dev->port_cnt; + return 0; } diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 9a51b8a4de5d1432a58537dcda4654932e7cc2dc..4a9239b2c2e4ac4a42b1c73f94463b45d414e9bd 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -974,23 +974,6 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port, PORT_MIRROR_SNIFFER, false); } -static void ksz9477_phy_setup(struct ksz_device *dev, int port, - struct phy_device *phy) -{ - /* Only apply to port with PHY. */ - if (port >= dev->phy_port_cnt) - return; - - /* The MAC actually cannot run in 1000 half-duplex mode. */ - phy_remove_link_mode(phy, - ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - /* PHY does not support gigabit. */ - if (!(dev->features & GBIT_SUPPORT)) - phy_remove_link_mode(phy, - ETHTOOL_LINK_MODE_1000baseT_Full_BIT); -} - static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data) { bool gbit; @@ -1588,6 +1571,9 @@ static int ksz9477_switch_init(struct ksz_device *dev) return -ENOMEM; } + /* set the real number of ports */ + dev->ds->num_ports = dev->port_cnt; + return 0; } @@ -1600,7 +1586,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .get_port_addr = ksz9477_get_port_addr, .cfg_port_member = ksz9477_cfg_port_member, .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table, - .phy_setup = ksz9477_phy_setup, .port_setup = ksz9477_port_setup, .r_mib_cnt = ksz9477_r_mib_cnt, .r_mib_pkt = ksz9477_r_mib_pkt, @@ -1614,7 +1599,29 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { int ksz9477_switch_register(struct ksz_device *dev) { - return ksz_switch_register(dev, &ksz9477_dev_ops); + int ret, i; + struct phy_device *phydev; + + ret = ksz_switch_register(dev, &ksz9477_dev_ops); + if (ret) + return ret; + + for (i = 0; i < dev->phy_port_cnt; ++i) { + if (!dsa_is_user_port(dev->ds, i)) + continue; + + phydev = dsa_to_port(dev->ds, i)->slave->phydev; + + /* The MAC actually cannot run in 1000 half-duplex mode. */ + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + /* PHY does not support gigabit. */ + if (!(dev->features & GBIT_SUPPORT)) + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + } + return ret; } EXPORT_SYMBOL(ksz9477_switch_register); diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index 7d050fab08892123c1a255b92a0acea680c1383f..7951f52d860d36812067a96923175ed31b6ca846 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -79,6 +79,7 @@ MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id); static const struct of_device_id ksz9477_dt_ids[] = { { .compatible = "microchip,ksz9477" }, { .compatible = "microchip,ksz9897" }, + { .compatible = "microchip,ksz9893" }, { .compatible = "microchip,ksz9567" }, {}, }; diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index fd1d6676ae4fdaced0ec3989ae381ab06983e8e1..7b6c0dce7536007614e09c99bfebbe70eee488a4 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -358,8 +358,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) /* setup slave port */ dev->dev_ops->port_setup(dev, port, false); - if (dev->dev_ops->phy_setup) - dev->dev_ops->phy_setup(dev, port, phy); /* port_stp_state_set() will be called after to enable the port so * there is no need to do anything. diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index f2c9bb68fd3306669ee79a9f4f4ce1dc9f15ffb4..7d11dd32ec0d1a718f632de6cf86dfa5f91b12ef 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -119,8 +119,6 @@ struct ksz_dev_ops { u32 (*get_port_addr)(int port, int offset); void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); - void (*phy_setup)(struct ksz_device *dev, int port, - struct phy_device *phy); void (*port_cleanup)(struct ksz_device *dev, int port); void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7627ea61e0ea82d649c600773a660f3ff4edac5b..fee16c947c2e8ba723808944c59b1cbc79d22233 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -664,8 +664,11 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, const struct phylink_link_state *state) { struct mv88e6xxx_chip *chip = ds->priv; + struct mv88e6xxx_port *p; int err; + p = &chip->ports[port]; + /* FIXME: is this the correct test? If we're in fixed mode on an * internal port, why should we process this any different from * PHY mode? On the other hand, the port may be automedia between @@ -675,10 +678,14 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, return; mv88e6xxx_reg_lock(chip); - /* FIXME: should we force the link down here - but if we do, how - * do we restore the link force/unforce state? The driver layering - * gets in the way. + /* In inband mode, the link may come up at any time while the link + * is not forced down. Force the link down while we reconfigure the + * interface mode. */ + if (mode == MLO_AN_INBAND && p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN); + err = mv88e6xxx_port_config_interface(chip, port, state->interface); if (err && err != -EOPNOTSUPP) goto err_unlock; @@ -691,6 +698,15 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, if (err > 0) err = 0; + /* Undo the forced down state above after completing configuration + * irrespective of its state on entry, which allows the link to come up. + */ + if (mode == MLO_AN_INBAND && p->interface != state->interface && + chip->info->ops->port_set_link) + chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); + + p->interface = state->interface; + err_unlock: mv88e6xxx_reg_unlock(chip); diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index e5430cf2ad711c2d2c6e23898f32b2bcd7903f32..6476524e8239d878210599ba803ea539ba8c4a50 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -232,6 +232,7 @@ struct mv88e6xxx_port { u64 atu_full_violation; u64 vtu_member_violation; u64 vtu_miss_violation; + phy_interface_t interface; u8 cmode; bool mirror_ingress; bool mirror_egress; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index ed5b465bc6640d00b29f1cc388874bbaf523d29f..992fedbe4ce3c24b07141c188452d7d3a2d81a09 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -64,6 +64,7 @@ struct aq_hw_caps_s { u8 rx_rings; bool flow_control; bool is_64_dma; + u32 quirks; u32 priv_data_len; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 4435c6374f7e052cb972c58b7cf7db8fab44bb9f..7c7bf6bf163f6167b1e29b6bc42bae64f27c7b91 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -415,6 +415,15 @@ int aq_nic_init(struct aq_nic_s *self) self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) { self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX; err = aq_phy_init(self->aq_hw); + + /* Disable the PTP on NICs where it's known to cause datapath + * problems. + * Ideally this should have been done by PHY provisioning, but + * many units have been shipped with enabled PTP block already. + */ + if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP) + if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX) + aq_phy_disable_ptp(self->aq_hw); } for (i = 0U; i < self->aq_vecs; i++) { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 2ab003065e6245379f40b9c50a922ed8e27fd921..439ce9692dac2edf98681c711b620141b7ce3c47 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -81,6 +81,8 @@ struct aq_nic_cfg_s { #define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U #define AQ_NIC_FLAG_ERR_HW 0x80000000U +#define AQ_NIC_QUIRK_BAD_PTP BIT(0) + #define AQ_NIC_WOL_MODES (WAKE_MAGIC |\ WAKE_PHY) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c index 51ae921e3e1f4abc233e31e6a35325935de1a2dd..949ac23517017de56950236346dc84c68358f0fe 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c @@ -1,10 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only -/* aQuantia Corporation Network Driver - * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2018-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ #include "aq_phy.h" +#define HW_ATL_PTP_DISABLE_MSK BIT(10) + bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw) { int err = 0; @@ -145,3 +149,24 @@ bool aq_phy_init(struct aq_hw_s *aq_hw) return true; } + +void aq_phy_disable_ptp(struct aq_hw_s *aq_hw) +{ + static const u16 ptp_registers[] = { + 0x031e, + 0x031d, + 0x031c, + 0x031b, + }; + u16 val; + int i; + + for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) { + val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1, + ptp_registers[i]); + + aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1, + ptp_registers[i], + val & ~HW_ATL_PTP_DISABLE_MSK); + } +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h index 84b72ad04a4ab887ede2b9a8c7362281b57f9bb0..86cc1ee836e2def2bcc1188e63edf15731e38326 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* aQuantia Corporation Network Driver - * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2018-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ #ifndef AQ_PHY_H @@ -29,4 +31,6 @@ bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw); bool aq_phy_init(struct aq_hw_s *aq_hw); +void aq_phy_disable_ptp(struct aq_hw_s *aq_hw); + #endif /* AQ_PHY_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 14d79f70cad779ecf924f052ef5b4d967d1eb518..2125bc20ab6a7a952c48943f3e46c13cfa8e255e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -93,6 +93,25 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = { AQ_NIC_RATE_100M, }; +const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = { + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, + .quirks = AQ_NIC_QUIRK_BAD_PTP, +}; + +const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = { + DEFAULT_B0_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, + .quirks = AQ_NIC_QUIRK_BAD_PTP, +}; + static int hw_atl_b0_hw_reset(struct aq_hw_s *self) { int err = 0; @@ -354,8 +373,13 @@ static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self) /* WSP, if min_rate is set for at least one TC. * RR otherwise. + * + * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't + * overwrite it here in that case. */ - hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); + if (!nic_cfg->is_ptp) + hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); + /* Data TC Arbiter takes precedence over Descriptor TC Arbiter, * leave Descriptor TC Arbiter as RR. */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h index 30f468f2084d5ffb48a1a3eba53f6f4a8c81e8b6..16091af1798048f511c7fafe5bcec19b925f38df 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -18,17 +18,15 @@ extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100; extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107; extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108; extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109; - -#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108 -#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109 +extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc111; +extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc112; #define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100 #define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107 #define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108 #define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109 - -#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108 -#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109 +#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc111 +#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc112 extern const struct aq_hw_ops hw_atl_ops_b0; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 3c8e8047ea1ed0ca6cca95fbfa29b1c9953f9db0..d775b23025c16a8d74a18364d9f2d92f5a113d25 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1700,7 +1700,7 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location, for (i = 0; i < 4; ++i) aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location + i), - ipv6_src[i]); + ipv6_src[3 - i]); } void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, @@ -1711,7 +1711,7 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, for (i = 0; i < 4; ++i) aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location + i), - ipv6_dest[i]); + ipv6_dest[3 - i]); } u32 hw_atl_sem_ram_get(struct aq_hw_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 06220792daf152fdfb0bb97b89f6f81704193c2f..7430ff025134129d0c1fefe8d7655c05acb72b7e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -1360,7 +1360,7 @@ */ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */ -#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4) +#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4) /* Bitmask for bitfield l3_da0[1F:0] */ #define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu /* Inverted bitmask for bitfield l3_da0[1F:0] */ diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 112edbd308230ee3c7f4082ef1e67f2edbbe9c97..38cce66ef212a5e7aadde101172478b59b61bb23 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -556,7 +556,8 @@ static int ag71xx_mdio_probe(struct ag71xx *ag) ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); if (IS_ERR(ag->mdio_reset)) { netif_err(ag, probe, ndev, "Failed to get reset mdio.\n"); - return PTR_ERR(ag->mdio_reset); + err = PTR_ERR(ag->mdio_reset); + goto mdio_err_put_clk; } mii_bus->name = "ag71xx_mdio"; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a884df44612aa4b0779e7d9747990b48e71d563..7463a1847cebd6135059df2027a8c004249ddc9f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3418,7 +3418,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp) */ void bnxt_set_ring_params(struct bnxt *bp) { - u32 ring_size, rx_size, rx_space; + u32 ring_size, rx_size, rx_space, max_rx_cmpl; u32 agg_factor = 0, agg_ring_size = 0; /* 8 for CRC and VLAN */ @@ -3474,7 +3474,15 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; - ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; + max_rx_cmpl = bp->rx_ring_size; + /* MAX TPA needs to be added because TPA_START completions are + * immediately recycled, so the TPA completions are not bound by + * the RX ring size. + */ + if (bp->flags & BNXT_FLAG_TPA) + max_rx_cmpl += bp->max_tpa; + /* RX and TPA completions are 32-byte, all others are 16-byte */ + ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; bp->cp_ring_size = ring_size; bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); @@ -10385,15 +10393,15 @@ static void bnxt_sp_task(struct work_struct *work) &bp->sp_event)) bnxt_hwrm_phy_qcaps(bp); - if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, - &bp->sp_event)) - bnxt_init_ethtool_link_settings(bp); - rc = bnxt_update_link(bp, true); - mutex_unlock(&bp->link_lock); if (rc) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", rc); + + if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, + &bp->sp_event)) + bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); } if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { int rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6b88143af5ea19e5c021cf6442d7cc77bd6fb4bc..b4aa56dc4f9fb4c61ac7dd3f0a9fb0d3e23be2b7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1765,8 +1765,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - if (netif_running(dev)) + if (netif_running(dev)) { + mutex_lock(&bp->link_lock); rc = bnxt_hwrm_set_pause(bp); + mutex_unlock(&bp->link_lock); + } return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 3a9a51f7063ae5b084aad84d5e17ac47d7aab466..392e32c7122a69d0e743e8a7828e1e1285392c25 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -396,6 +396,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp) } } + bp->pf.active_vfs = 0; kfree(bp->pf.vf); bp->pf.vf = NULL; } @@ -835,7 +836,6 @@ void bnxt_sriov_disable(struct bnxt *bp) bnxt_free_vf_resources(bp); - bp->pf.active_vfs = 0; /* Reclaim all resources for the PF. */ rtnl_lock(); bnxt_restore_pf_fw_resources(bp); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index af924a8b885fda8b1139c0a1ad215687581eda15..e471b14fc6e98c9c513162e2d828ccc9fa00845f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -543,14 +543,14 @@ static int bcmgenet_hfb_validate_mask(void *mask, size_t size) #define VALIDATE_MASK(x) \ bcmgenet_hfb_validate_mask(&(x), sizeof(x)) -static int bcmgenet_hfb_insert_data(u32 *f, int offset, - void *val, void *mask, size_t size) +static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index, + u32 offset, void *val, void *mask, + size_t size) { - int index; - u32 tmp; + u32 index, tmp; - index = offset / 2; - tmp = f[index]; + index = f_index * priv->hw_params->hfb_filter_size + offset / 2; + tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32)); while (size--) { if (offset++ & 1) { @@ -567,9 +567,10 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset, tmp |= 0x10000; break; } - f[index++] = tmp; + bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32)); if (size) - tmp = f[index]; + tmp = bcmgenet_hfb_readl(priv, + index * sizeof(u32)); } else { tmp &= ~0xCFF00; tmp |= (*(unsigned char *)val++) << 8; @@ -585,44 +586,26 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset, break; } if (!size) - f[index] = tmp; + bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32)); } } return 0; } -static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data, - u32 f_length, u32 rx_queue, int f_index) -{ - u32 base = f_index * priv->hw_params->hfb_filter_size; - int i; - - for (i = 0; i < f_length; i++) - bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32)); - - bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length); - bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue); -} - -static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, - struct bcmgenet_rxnfc_rule *rule) +static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, + struct bcmgenet_rxnfc_rule *rule) { struct ethtool_rx_flow_spec *fs = &rule->fs; - int err = 0, offset = 0, f_length = 0; + u32 offset = 0, f_length = 0, f; u8 val_8, mask_8; __be16 val_16; u16 mask_16; size_t size; - u32 *f_data; - - f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32), - GFP_KERNEL); - if (!f_data) - return -ENOMEM; + f = fs->location; if (fs->flow_type & FLOW_MAC_EXT) { - bcmgenet_hfb_insert_data(f_data, 0, + bcmgenet_hfb_insert_data(priv, f, 0, &fs->h_ext.h_dest, &fs->m_ext.h_dest, sizeof(fs->h_ext.h_dest)); } @@ -630,11 +613,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, if (fs->flow_type & FLOW_EXT) { if (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci) { - bcmgenet_hfb_insert_data(f_data, 12, + bcmgenet_hfb_insert_data(priv, f, 12, &fs->h_ext.vlan_etype, &fs->m_ext.vlan_etype, sizeof(fs->h_ext.vlan_etype)); - bcmgenet_hfb_insert_data(f_data, 14, + bcmgenet_hfb_insert_data(priv, f, 14, &fs->h_ext.vlan_tci, &fs->m_ext.vlan_tci, sizeof(fs->h_ext.vlan_tci)); @@ -646,15 +629,15 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { case ETHER_FLOW: f_length += DIV_ROUND_UP(ETH_HLEN, 2); - bcmgenet_hfb_insert_data(f_data, 0, + bcmgenet_hfb_insert_data(priv, f, 0, &fs->h_u.ether_spec.h_dest, &fs->m_u.ether_spec.h_dest, sizeof(fs->h_u.ether_spec.h_dest)); - bcmgenet_hfb_insert_data(f_data, ETH_ALEN, + bcmgenet_hfb_insert_data(priv, f, ETH_ALEN, &fs->h_u.ether_spec.h_source, &fs->m_u.ether_spec.h_source, sizeof(fs->h_u.ether_spec.h_source)); - bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset, + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, &fs->h_u.ether_spec.h_proto, &fs->m_u.ether_spec.h_proto, sizeof(fs->h_u.ether_spec.h_proto)); @@ -664,21 +647,21 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, /* Specify IP Ether Type */ val_16 = htons(ETH_P_IP); mask_16 = 0xFFFF; - bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset, + bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset, &val_16, &mask_16, sizeof(val_16)); - bcmgenet_hfb_insert_data(f_data, 15 + offset, + bcmgenet_hfb_insert_data(priv, f, 15 + offset, &fs->h_u.usr_ip4_spec.tos, &fs->m_u.usr_ip4_spec.tos, sizeof(fs->h_u.usr_ip4_spec.tos)); - bcmgenet_hfb_insert_data(f_data, 23 + offset, + bcmgenet_hfb_insert_data(priv, f, 23 + offset, &fs->h_u.usr_ip4_spec.proto, &fs->m_u.usr_ip4_spec.proto, sizeof(fs->h_u.usr_ip4_spec.proto)); - bcmgenet_hfb_insert_data(f_data, 26 + offset, + bcmgenet_hfb_insert_data(priv, f, 26 + offset, &fs->h_u.usr_ip4_spec.ip4src, &fs->m_u.usr_ip4_spec.ip4src, sizeof(fs->h_u.usr_ip4_spec.ip4src)); - bcmgenet_hfb_insert_data(f_data, 30 + offset, + bcmgenet_hfb_insert_data(priv, f, 30 + offset, &fs->h_u.usr_ip4_spec.ip4dst, &fs->m_u.usr_ip4_spec.ip4dst, sizeof(fs->h_u.usr_ip4_spec.ip4dst)); @@ -688,11 +671,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, /* Only supports 20 byte IPv4 header */ val_8 = 0x45; mask_8 = 0xFF; - bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset, + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset, &val_8, &mask_8, sizeof(val_8)); size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes); - bcmgenet_hfb_insert_data(f_data, + bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + 20 + offset, &fs->h_u.usr_ip4_spec.l4_4_bytes, &fs->m_u.usr_ip4_spec.l4_4_bytes, @@ -701,34 +684,42 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, break; } + bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { /* Ring 0 flows can be handled by the default Descriptor Ring * We'll map them to ring 0, but don't enable the filter */ - bcmgenet_hfb_set_filter(priv, f_data, f_length, 0, - fs->location); + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); rule->state = BCMGENET_RXNFC_STATE_DISABLED; } else { /* Other Rx rings are direct mapped here */ - bcmgenet_hfb_set_filter(priv, f_data, f_length, - fs->ring_cookie, fs->location); - bcmgenet_hfb_enable_filter(priv, fs->location); + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, + fs->ring_cookie); + bcmgenet_hfb_enable_filter(priv, f); rule->state = BCMGENET_RXNFC_STATE_ENABLED; } - - kfree(f_data); - - return err; } /* bcmgenet_hfb_clear * * Clear Hardware Filter Block and disable all filtering. */ +static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) +{ + u32 base, i; + + base = f_index * priv->hw_params->hfb_filter_size; + for (i = 0; i < priv->hw_params->hfb_filter_size; i++) + bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); +} + static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) { u32 i; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); @@ -740,19 +731,18 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); - for (i = 0; i < priv->hw_params->hfb_filter_cnt * - priv->hw_params->hfb_filter_size; i++) - bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32)); + for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) + bcmgenet_hfb_clear_filter(priv, i); } static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) { int i; + INIT_LIST_HEAD(&priv->rxnfc_list); if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) return; - INIT_LIST_HEAD(&priv->rxnfc_list); for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; @@ -1437,18 +1427,15 @@ static int bcmgenet_insert_flow(struct net_device *dev, loc_rule = &priv->rxnfc_rules[cmd->fs.location]; if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) bcmgenet_hfb_disable_filter(priv, cmd->fs.location); - if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) + if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { list_del(&loc_rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; memcpy(&loc_rule->fs, &cmd->fs, sizeof(struct ethtool_rx_flow_spec)); - err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); - if (err) { - netdev_err(dev, "rxnfc: Could not install rule (%d)\n", - err); - return err; - } + bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule); list_add_tail(&loc_rule->list, &priv->rxnfc_list); @@ -1473,8 +1460,10 @@ static int bcmgenet_delete_flow(struct net_device *dev, if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) bcmgenet_hfb_disable_filter(priv, cmd->fs.location); - if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { list_del(&rule->list); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + } rule->state = BCMGENET_RXNFC_STATE_UNUSED; memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); @@ -3999,7 +3988,7 @@ static int bcmgenet_probe(struct platform_device *pdev) if (err) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) - goto err; + goto err_clk_disable; /* Mii wait queue */ init_waitqueue_head(&priv->wq); @@ -4011,14 +4000,14 @@ static int bcmgenet_probe(struct platform_device *pdev) if (IS_ERR(priv->clk_wol)) { dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n"); err = PTR_ERR(priv->clk_wol); - goto err; + goto err_clk_disable; } priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee"); if (IS_ERR(priv->clk_eee)) { dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n"); err = PTR_ERR(priv->clk_eee); - goto err; + goto err_clk_disable; } /* If this is an internal GPHY, power it on now, before UniMAC is @@ -4129,8 +4118,9 @@ static int bcmgenet_resume(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; unsigned long dma_ctrl; - u32 offset, reg; + u32 reg; int ret; if (!netif_running(dev)) @@ -4161,10 +4151,11 @@ static int bcmgenet_resume(struct device *d) bcmgenet_set_hw_addr(priv, dev->dev_addr); - offset = HFB_FLT_ENABLE_V3PLUS; - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset); - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32)); - bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL); + /* Restore hardware filters */ + bcmgenet_hfb_clear(priv); + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) + bcmgenet_hfb_create_rxnfc_filter(priv, rule); if (priv->internal_phy) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); @@ -4208,7 +4199,6 @@ static int bcmgenet_suspend(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); - u32 offset; if (!netif_running(dev)) return 0; @@ -4220,11 +4210,7 @@ static int bcmgenet_suspend(struct device *d) if (!device_may_wakeup(d)) phy_suspend(dev->phydev); - /* Preserve filter state and disable filtering */ - priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); - offset = HFB_FLT_ENABLE_V3PLUS; - priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset); - priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); + /* Disable filtering */ bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); return 0; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index a12cb59298f4359b4189063965a575e12afff11e..f6ca01da141d4bc0d0db45a6eaf6ef531a655473 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -696,7 +696,6 @@ struct bcmgenet_priv { u32 wolopts; u8 sopass[SOPASS_MAX]; bool wol_active; - u32 hfb_en[3]; struct bcmgenet_mib_counters mib; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 4ea6a26b04f7011d417fd737c8032a20b3c61944..1c86eddb1b510c34142e79f0722c5a4cce120cd8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -217,20 +217,28 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, priv->wol_active = 0; clk_disable_unprepare(priv->clk_wol); + priv->crc_fwd_en = 0; /* Disable Magic Packet Detection */ - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); - reg &= ~(MPD_EN | MPD_PW_EN); - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already reset so skip the rest */ + reg &= ~(MPD_EN | MPD_PW_EN); + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } /* Disable WAKE_FILTER Detection */ - reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); - reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); - bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + if (priv->wolopts & WAKE_FILTER) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (!(reg & RBUF_ACPI_EN)) + return; /* already reset so skip the rest */ + reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } /* Disable CRC Forward */ reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg &= ~CMD_CRC_FWD; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - priv->crc_fwd_en = 0; } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 52582e8ed90e537a199043488f56b2a14af26fe5..2213e6ab81512fbc833ccb2484c85243a4b6b7ea 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -2821,11 +2821,13 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct macb *bp = netdev_priv(netdev); - wol->supported = 0; - wol->wolopts = 0; - - if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) + if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { phylink_ethtool_get_wol(bp->phylink, wol); + wol->supported |= WAKE_MAGIC; + + if (bp->wol & MACB_WOL_ENABLED) + wol->wolopts |= WAKE_MAGIC; + } } static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -2833,9 +2835,13 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) struct macb *bp = netdev_priv(netdev); int ret; + /* Pass the order to phylink layer */ ret = phylink_ethtool_set_wol(bp->phylink, wol); - if (!ret) - return 0; + /* Don't manage WoL on MAC if handled by the PHY + * or if there's a failure in talking to the PHY + */ + if (!ret || ret != -EOPNOTSUPP) + return ret; if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || (wol->wolopts & ~WAKE_MAGIC)) @@ -3730,7 +3736,7 @@ static int macb_init(struct platform_device *pdev) if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { val = 0; - if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(bp->phy_interface)) val = GEM_BIT(RGMII); else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) @@ -4422,7 +4428,7 @@ static int macb_probe(struct platform_device *pdev) bp->wol = 0; if (of_get_property(np, "magic-packet", NULL)) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; - device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); spin_lock_init(&bp->lock); @@ -4598,10 +4604,10 @@ static int __maybe_unused macb_suspend(struct device *dev) bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); } - netif_carrier_off(netdev); if (bp->ptp_info) bp->ptp_info->ptp_remove(netdev); - pm_runtime_force_suspend(dev); + if (!device_may_wakeup(dev)) + pm_runtime_force_suspend(dev); return 0; } @@ -4616,7 +4622,8 @@ static int __maybe_unused macb_resume(struct device *dev) if (!netif_running(netdev)) return 0; - pm_runtime_force_resume(dev); + if (!device_may_wakeup(dev)) + pm_runtime_force_resume(dev); if (bp->wol & MACB_WOL_ENABLED) { macb_writel(bp, IDR, MACB_BIT(WOL)); @@ -4654,7 +4661,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(&bp->dev->dev))) { + if (!(device_may_wakeup(dev))) { clk_disable_unprepare(bp->tx_clk); clk_disable_unprepare(bp->hclk); clk_disable_unprepare(bp->pclk); @@ -4670,7 +4677,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(&bp->dev->dev))) { + if (!(device_may_wakeup(dev))) { clk_prepare_enable(bp->pclk); clk_prepare_enable(bp->hclk); clk_prepare_enable(bp->tx_clk); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 7a7f61a8cdf409f33774ec5311998d8a597d8949..d02d346629b36395e3a3370976c10873ff43f2c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1112,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family) struct in_addr *addr; addr = (struct in_addr *)ipmask; - if (ntohl(addr->s_addr) == 0xffffffff) + if (addr->s_addr == htonl(0xffffffff)) return true; } else if (family == AF_INET6) { struct in6_addr *addr6; addr6 = (struct in6_addr *)ipmask; - if (ntohl(addr6->s6_addr32[0]) == 0xffffffff && - ntohl(addr6->s6_addr32[1]) == 0xffffffff && - ntohl(addr6->s6_addr32[2]) == 0xffffffff && - ntohl(addr6->s6_addr32[3]) == 0xffffffff) + if (addr6->s6_addr32[0] == htonl(0xffffffff) && + addr6->s6_addr32[1] == htonl(0xffffffff) && + addr6->s6_addr32[2] == htonl(0xffffffff) && + addr6->s6_addr32[3] == htonl(0xffffffff)) return true; } return false; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 32a45dc51ed7966b912097a1bb83872698a686d8..92eee66cbc8459001e7017b4cdc1647e9c8ef088 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2938,6 +2938,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb, txq_info = adap->sge.uld_txq_info[tx_uld_type]; if (unlikely(!txq_info)) { WARN_ON(true); + kfree_skb(skb); return NET_XMIT_DROP; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 1aa6dc10dc0be6c31a6498f1a4b98a9788bf5c4c..ad522f822cc22b623b5966957bb2d0e1e092cf4b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3493,7 +3493,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, drv_fw = &fw_info->fw_hdr; /* Read the header of the firmware on the card */ - ret = -t4_read_flash(adap, FLASH_FW_START, + ret = t4_read_flash(adap, FLASH_FW_START, sizeof(*card_fw) / sizeof(uint32_t), (uint32_t *)card_fw, 1); if (ret == 0) { @@ -3522,8 +3522,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, should_install_fs_fw(adap, card_fw_usable, be32_to_cpu(fs_fw->fw_ver), be32_to_cpu(card_fw->fw_ver))) { - ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, - fw_size, 0); + ret = t4_fw_upgrade(adap, adap->mbox, fw_data, + fw_size, 0); if (ret != 0) { dev_err(adap->pdev_dev, "failed to install firmware: %d\n", ret); @@ -3554,7 +3554,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); - ret = EINVAL; + ret = -EINVAL; goto bye; } diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 8d13ea370db10543b8dbc33fe45fd4bf7b7fc2d8..66e67b24a887c572c5c94309c540df22fb7cd224 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(port->reset)) { dev_err(dev, "no reset\n"); + clk_disable_unprepare(port->pclk); return PTR_ERR(port->reset); } reset_control_reset(port->reset); @@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) IRQF_SHARED, port_names[port->id], port); - if (ret) + if (ret) { + clk_disable_unprepare(port->pclk); return ret; + } ret = register_netdev(netdev); if (!ret) { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 2972244e6eb0d6745991a0ff45e1533cac0a159e..43570f4911ea1276275617b21572a2f1b08b4020 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2938,7 +2938,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) DMA_BIT_MASK(40)); if (err) { netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n"); - return err; + goto free_netdev; } /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index f150cd454fa44929708a77b1b64efc8b6bf16639..0998ceb1a26ea829d98422f4fbcd86a4a8698938 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -3632,7 +3632,7 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); dpmac_dev = fsl_mc_get_endpoint(dpni_dev); - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) return 0; if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io)) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 96831f49925c088ce6bacd32f47515471bb67fbc..22105d09bc895330633cfa3137da7857caff535d 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -266,7 +266,7 @@ static irqreturn_t enetc_msix(int irq, void *data) /* disable interrupts */ enetc_wr_reg(v->rbier, 0); - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); napi_schedule_irqoff(&v->napi); @@ -302,7 +302,7 @@ static int enetc_poll(struct napi_struct *napi, int budget) /* enable interrupts */ enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); - for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), ENETC_TBIER_TXTIE); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 4fac57dbb3c8fdfa5ed6bed6f07dadfd5ac4e334..7a9675bd36e8bdb9b5ce31799937a74099190324 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -906,6 +906,7 @@ static int enetc_pf_probe(struct pci_dev *pdev, return 0; err_reg_netdev: + enetc_mdio_remove(pf); enetc_of_put_phy(priv); enetc_free_msix(priv); err_alloc_msix: diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index a6cdd5b61921bb3472f3e8b72faefffddcc2e9cf..832a2175636d69b7a29ecd560b8b8804fa4c759f 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -525,11 +525,6 @@ struct fec_enet_private { unsigned int total_tx_ring_size; unsigned int total_rx_ring_size; - unsigned long work_tx; - unsigned long work_rx; - unsigned long work_ts; - unsigned long work_mdio; - struct platform_device *pdev; int dev_id; @@ -595,6 +590,7 @@ struct fec_enet_private { void fec_ptp_init(struct platform_device *pdev, int irq_idx); void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); +void fec_ptp_disable_hwts(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2d0d313ee7c5a193a805f858b9fcd83f98a4ebea..cc7fbfc093548edd277712455e4fc3d805e12a59 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -75,8 +75,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define DRIVER_NAME "fec" -#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) - /* Pause frame feild and FIFO threshold */ #define FEC_ENET_FCE (1 << 5) #define FEC_ENET_RSEM_V 0x84 @@ -1248,8 +1246,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) fep = netdev_priv(ndev); - queue_id = FEC_ENET_GET_QUQUE(queue_id); - txq = fep->tx_queue[queue_id]; /* get next bdp of dirty_tx */ nq = netdev_get_tx_queue(ndev, queue_id); @@ -1298,8 +1294,13 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ndev->stats.tx_bytes += skb->len; } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && - fep->bufdesc_ex) { + /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who + * are to time stamp the packet, so we still need to check time + * stamping enabled flag. + */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && + fep->hwts_tx_en) && + fep->bufdesc_ex) { struct skb_shared_hwtstamps shhwtstamps; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; @@ -1340,17 +1341,14 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) writel(0, txq->bd.reg_desc_active); } -static void -fec_enet_tx(struct net_device *ndev) +static void fec_enet_tx(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - u16 queue_id; - /* First process class A queue, then Class B and Best Effort queue */ - for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { - clear_bit(queue_id, &fep->work_tx); - fec_enet_tx_queue(ndev, queue_id); - } - return; + int i; + + /* Make sure that AVB queues are processed first. */ + for (i = fep->num_tx_queues - 1; i >= 0; i--) + fec_enet_tx_queue(ndev, i); } static int @@ -1426,7 +1424,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) #ifdef CONFIG_M532x flush_cache_all(); #endif - queue_id = FEC_ENET_GET_QUQUE(queue_id); rxq = fep->rx_queue[queue_id]; /* First, grab all of the stats for the incoming packet. @@ -1550,6 +1547,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) htons(ETH_P_8021Q), vlan_tag); + skb_record_rx_queue(skb, queue_id); napi_gro_receive(&fep->napi, skb); if (is_copybreak) { @@ -1595,48 +1593,30 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) return pkt_received; } -static int -fec_enet_rx(struct net_device *ndev, int budget) +static int fec_enet_rx(struct net_device *ndev, int budget) { - int pkt_received = 0; - u16 queue_id; struct fec_enet_private *fep = netdev_priv(ndev); + int i, done = 0; - for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { - int ret; + /* Make sure that AVB queues are processed first. */ + for (i = fep->num_rx_queues - 1; i >= 0; i--) + done += fec_enet_rx_queue(ndev, budget - done, i); - ret = fec_enet_rx_queue(ndev, - budget - pkt_received, queue_id); - - if (ret < budget - pkt_received) - clear_bit(queue_id, &fep->work_rx); - - pkt_received += ret; - } - return pkt_received; + return done; } -static bool -fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) +static bool fec_enet_collect_events(struct fec_enet_private *fep) { - if (int_events == 0) - return false; + uint int_events; + + int_events = readl(fep->hwp + FEC_IEVENT); - if (int_events & FEC_ENET_RXF_0) - fep->work_rx |= (1 << 2); - if (int_events & FEC_ENET_RXF_1) - fep->work_rx |= (1 << 0); - if (int_events & FEC_ENET_RXF_2) - fep->work_rx |= (1 << 1); + /* Don't clear MDIO events, we poll for those */ + int_events &= ~FEC_ENET_MII; - if (int_events & FEC_ENET_TXF_0) - fep->work_tx |= (1 << 2); - if (int_events & FEC_ENET_TXF_1) - fep->work_tx |= (1 << 0); - if (int_events & FEC_ENET_TXF_2) - fep->work_tx |= (1 << 1); + writel(int_events, fep->hwp + FEC_IEVENT); - return true; + return int_events != 0; } static irqreturn_t @@ -1644,18 +1624,9 @@ fec_enet_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct fec_enet_private *fep = netdev_priv(ndev); - uint int_events; irqreturn_t ret = IRQ_NONE; - int_events = readl(fep->hwp + FEC_IEVENT); - - /* Don't clear MDIO events, we poll for those */ - int_events &= ~FEC_ENET_MII; - - writel(int_events, fep->hwp + FEC_IEVENT); - fec_enet_collect_events(fep, int_events); - - if ((fep->work_tx || fep->work_rx) && fep->link) { + if (fec_enet_collect_events(fep) && fep->link) { ret = IRQ_HANDLED; if (napi_schedule_prep(&fep->napi)) { @@ -1672,17 +1643,19 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; struct fec_enet_private *fep = netdev_priv(ndev); - int pkts; + int done = 0; - pkts = fec_enet_rx(ndev, budget); - - fec_enet_tx(ndev); + do { + done += fec_enet_rx(ndev, budget - done); + fec_enet_tx(ndev); + } while ((done < budget) && fec_enet_collect_events(fep)); - if (pkts < budget) { - napi_complete_done(napi, pkts); + if (done < budget) { + napi_complete_done(napi, done); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } - return pkts; + + return done; } /* ------------------------------------------------------------------------- */ @@ -2755,10 +2728,16 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) return -ENODEV; if (fep->bufdesc_ex) { - if (cmd == SIOCSHWTSTAMP) - return fec_ptp_set(ndev, rq); - if (cmd == SIOCGHWTSTAMP) - return fec_ptp_get(ndev, rq); + bool use_fec_hwts = !phy_has_hwtstamp(phydev); + + if (cmd == SIOCSHWTSTAMP) { + if (use_fec_hwts) + return fec_ptp_set(ndev, rq); + fec_ptp_disable_hwts(ndev); + } else if (cmd == SIOCGHWTSTAMP) { + if (use_fec_hwts) + return fec_ptp_get(ndev, rq); + } } return phy_mii_ioctl(phydev, rq, cmd); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 945643c026155f83fcb38ecfac40d7ef37c2d888..f8a592c96beb06e04145c334224e5ab8b1422060 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -452,6 +452,18 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } +/** + * fec_ptp_disable_hwts - disable hardware time stamping + * @ndev: pointer to net_device + */ +void fec_ptp_disable_hwts(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + fep->hwts_tx_en = 0; + fep->hwts_rx_en = 0; +} + int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) { struct fec_enet_private *fep = netdev_priv(ndev); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index b3c69e9038eac4bab916c0d1a4839cacc2eb6f4f..b513b8c5c3b5e5be7027e36acad20f6a15b9c991 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -779,8 +779,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) mac_addr = of_get_mac_address(np); - if (!IS_ERR(mac_addr)) + if (!IS_ERR(mac_addr)) { ether_addr_copy(dev->dev_addr, mac_addr); + } else { + eth_hw_addr_random(dev); + dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr); + } if (model && !strcasecmp(model, "TSEC")) priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT | diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index d041cac9a487a842b59acd3c49034c5e7b2246c7..088550db2de78e056255c5eb820f835b788431a2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -77,6 +77,7 @@ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) enum hns_desc_type { + DESC_TYPE_UNKNOWN, DESC_TYPE_SKB, DESC_TYPE_FRAGLIST_SKB, DESC_TYPE_PAGE, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b14f2abc242501353915ee84784f7a90941d8366..71ed4c54f6d5dffa4964aab211f8feacb5ddbd4d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, int k, sizeoflast; dma_addr_t dma; - if (type == DESC_TYPE_SKB) { - struct sk_buff *skb = (struct sk_buff *)priv; - int ret; - - ret = hns3_fill_skb_desc(ring, skb, desc); - if (unlikely(ret < 0)) - return ret; - - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - } else if (type == DESC_TYPE_FRAGLIST_SKB) { + if (type == DESC_TYPE_FRAGLIST_SKB || + type == DESC_TYPE_SKB) { struct sk_buff *skb = (struct sk_buff *)priv; dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); @@ -1118,12 +1110,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return -ENOMEM; } + desc_cb->priv = priv; desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; if (likely(size <= HNS3_MAX_BD_SIZE)) { - desc_cb->priv = priv; - desc_cb->dma = dma; - desc_cb->type = type; desc->addr = cpu_to_le64(dma); desc->tx.send_size = cpu_to_le16(size); desc->tx.bdtp_fe_sc_vld_ra_ri = @@ -1135,18 +1127,11 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, } frag_buf_num = hns3_tx_bd_count(size); - sizeoflast = size & HNS3_TX_LAST_SIZE_M; + sizeoflast = size % HNS3_MAX_BD_SIZE; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ - desc_cb->priv = priv; - desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; - desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB || - type == DESC_TYPE_SKB) && !k) ? - type : DESC_TYPE_PAGE; - /* now, fill the descriptor */ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? @@ -1158,7 +1143,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, /* move ring pointer to next */ ring_ptr_move_fw(ring, next_to_use); - desc_cb = &ring->desc_cb[ring->next_to_use]; desc = &ring->desc[ring->next_to_use]; } @@ -1346,6 +1330,10 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) unsigned int i; for (i = 0; i < ring->desc_num; i++) { + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + + memset(desc, 0, sizeof(*desc)); + /* check if this is where we started */ if (ring->next_to_use == next_to_use_orig) break; @@ -1353,6 +1341,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) /* rollback one */ ring_ptr_move_bw(ring, next_to_use); + if (!ring->desc_cb[ring->next_to_use].dma) + continue; + /* unmap the descriptor dma address */ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || ring->desc_cb[ring->next_to_use].type == @@ -1369,6 +1360,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) ring->desc_cb[ring->next_to_use].length = 0; ring->desc_cb[ring->next_to_use].dma = 0; + ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN; } } @@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) next_to_use_head = ring->next_to_use; + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]); + if (unlikely(ret < 0)) + goto fill_err; + ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); if (unlikely(ret < 0)) goto fill_err; @@ -4127,9 +4123,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_put_ring_config(priv); - hns3_dbg_uninit(handle); - out_netdev_free: + hns3_dbg_uninit(handle); free_netdev(netdev); } @@ -4141,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) return; if (linkup) { - netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); + netif_carrier_on(netdev); if (netif_msg_link(handle)) netdev_info(netdev, "link up\n"); } else { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 66cd4395f78146d3aa56dd90b6afede5cedd6c39..a8776620acbc6c1f5f83fbd8880e03d780380f32 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -165,8 +165,6 @@ enum hns3_nic_state { #define HNS3_TXD_MSS_S 0 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) -#define HNS3_TX_LAST_SIZE_M 0xffff - #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 6b1545f982aad5b7fd7059096426b4c63a395ae4..2622e04e8eedaf7130450516687b69ad28b93f22 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -180,18 +180,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, { struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; unsigned char *packet = skb->data; + u32 len = skb_headlen(skb); u32 i; - for (i = 0; i < skb->len; i++) + len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE); + + for (i = 0; i < len; i++) if (packet[i] != (unsigned char)(i & 0xff)) break; /* The packet is correctly received */ - if (i == skb->len) + if (i == HNS3_NIC_LB_TEST_PACKET_SIZE) tqp_vector->rx_group.total_packets++; else print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, - skb->data, skb->len, true); + skb->data, len, true); dev_kfree_skb_any(skb); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 96bfad52630d3d29fd2c13c62d6f727a1210ef22..36575e72a915e86de79dd0495b3b3854f89825fa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2673,11 +2673,10 @@ void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) delay_time); } -static int hclge_get_mac_link_status(struct hclge_dev *hdev) +static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) { struct hclge_link_status_cmd *req; struct hclge_desc desc; - int link_status; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); @@ -2689,33 +2688,25 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev) } req = (struct hclge_link_status_cmd *)desc.data; - link_status = req->status & HCLGE_LINK_STATUS_UP_M; + *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ? + HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; - return !!link_status; + return 0; } -static int hclge_get_mac_phy_link(struct hclge_dev *hdev) +static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) { - unsigned int mac_state; - int link_stat; + struct phy_device *phydev = hdev->hw.mac.phydev; + + *link_status = HCLGE_LINK_STATUS_DOWN; if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) return 0; - mac_state = hclge_get_mac_link_status(hdev); - - if (hdev->hw.mac.phydev) { - if (hdev->hw.mac.phydev->state == PHY_RUNNING) - link_stat = mac_state & - hdev->hw.mac.phydev->link; - else - link_stat = 0; - - } else { - link_stat = mac_state; - } + if (phydev && (phydev->state != PHY_RUNNING || !phydev->link)) + return 0; - return !!link_stat; + return hclge_get_mac_link_status(hdev, link_status); } static void hclge_update_link_status(struct hclge_dev *hdev) @@ -2725,6 +2716,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev) struct hnae3_handle *rhandle; struct hnae3_handle *handle; int state; + int ret; int i; if (!client) @@ -2733,7 +2725,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev) if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) return; - state = hclge_get_mac_phy_link(hdev); + ret = hclge_get_mac_phy_link(hdev, &state); + if (ret) { + clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); + return; + } + if (state != hdev->hw.mac.link) { for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { handle = &hdev->vport[i].nic; @@ -5809,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, /* to avoid rule conflict, when user configure rule by ethtool, * we need to clear all arfs rules */ + spin_lock_bh(&hdev->fd_rule_lock); hclge_clear_arfs_rules(handle); - spin_lock_bh(&hdev->fd_rule_lock); ret = hclge_fd_config_rule(hdev, rule); spin_unlock_bh(&hdev->fd_rule_lock); @@ -5854,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, return ret; } +/* make sure being called after lock up with fd_rule_lock */ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bool clear_list) { @@ -5866,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, if (!hnae3_dev_fd_supported(hdev)) return; - spin_lock_bh(&hdev->fd_rule_lock); for_each_set_bit(location, hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, @@ -5883,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle, bitmap_zero(hdev->fd_bmap, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); } - - spin_unlock_bh(&hdev->fd_rule_lock); } static int hclge_restore_fd_entries(struct hnae3_handle *handle) @@ -6266,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, u16 flow_id, struct flow_keys *fkeys) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_fd_rule_tuples new_tuples; + struct hclge_fd_rule_tuples new_tuples = {}; struct hclge_dev *hdev = vport->back; struct hclge_fd_rule *rule; u16 tmp_queue_id; @@ -6276,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; - memset(&new_tuples, 0, sizeof(new_tuples)); - hclge_fd_get_flow_tuples(fkeys, &new_tuples); - - spin_lock_bh(&hdev->fd_rule_lock); - /* when there is already fd rule existed add by user, * arfs should not work */ + spin_lock_bh(&hdev->fd_rule_lock); if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { spin_unlock_bh(&hdev->fd_rule_lock); return -EOPNOTSUPP; } + hclge_fd_get_flow_tuples(fkeys, &new_tuples); + /* check is there flow director filter existed for this flow, * if not, create a new filter for it; * if filter exist with different queue id, modify the filter; @@ -6371,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) #endif } +/* make sure being called after lock up with fd_rule_lock */ static void hclge_clear_arfs_rules(struct hnae3_handle *handle) { #ifdef CONFIG_RFS_ACCEL @@ -6423,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) hdev->fd_en = enable; clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; - if (!enable) + + if (!enable) { + spin_lock_bh(&hdev->fd_rule_lock); hclge_del_all_fd_entries(handle, clear); - else + spin_unlock_bh(&hdev->fd_rule_lock); + } else { hclge_restore_fd_entries(handle); + } } static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) @@ -6524,14 +6522,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) { #define HCLGE_MAC_LINK_STATUS_NUM 100 + int link_status; int i = 0; int ret; do { - ret = hclge_get_mac_link_status(hdev); - if (ret < 0) + ret = hclge_get_mac_link_status(hdev, &link_status); + if (ret) return ret; - else if (ret == link_ret) + if (link_status == link_ret) return 0; msleep(HCLGE_LINK_STATUS_MS); @@ -6542,9 +6541,6 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, bool is_phy) { -#define HCLGE_LINK_STATUS_DOWN 0 -#define HCLGE_LINK_STATUS_UP 1 - int link_ret; link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; @@ -6891,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle) int i; set_bit(HCLGE_STATE_DOWN, &hdev->state); - + spin_lock_bh(&hdev->fd_rule_lock); hclge_clear_arfs_rules(handle); + spin_unlock_bh(&hdev->fd_rule_lock); /* If it is not PF reset, the firmware will disable the MAC, * so it only need to stop phy here. @@ -9045,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, bool writen_to_tbl = false; int ret = 0; - /* When device is resetting, firmware is unable to handle - * mailbox. Just record the vlan id, and remove it after + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after * reset finished. */ - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) { + if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, vport->vlan_del_fail_bmap); return -EBUSY; } @@ -9859,7 +9857,7 @@ static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); hdev->reset_type = HNAE3_FLR_RESET; ret = hclge_reset_prepare(hdev); - if (ret) { + if (ret || hdev->reset_pending) { dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n", ret); if (hdev->reset_pending || diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 46e6e0fef3ba5a7a591b021b5efb2fd312ad808f..9bbdd4557c271c620179a9f539db94d37fd689ec 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -317,6 +317,9 @@ enum hclge_link_fail_code { HCLGE_LF_XSFP_ABSENT, }; +#define HCLGE_LINK_STATUS_DOWN 0 +#define HCLGE_LINK_STATUS_UP 1 + #define HCLGE_PG_NUM 4 #define HCLGE_SCH_MODE_SP 0 #define HCLGE_SCH_MODE_DWRR 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 1b9578d0bd800b91d622f9b4620d54d92f7eec1a..9162856de1b19c219934fd82da58ba3be976cc2f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; - /* When device is resetting, firmware is unable to handle - * mailbox. Just record the vlan id, and remove it after + /* When device is resetting or reset failed, firmware is unable to + * handle mailbox. Just record the vlan id, and remove it after * reset finished. */ - if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) { + if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { set_bit(vlan_id, hdev->vlan_del_fail_bmap); return -EBUSY; } @@ -1793,6 +1794,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to assert VF reset, ret = %d\n", ret); + return ret; + } hdev->rst_stats.vf_func_rst_cnt++; } @@ -3434,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, { struct hnae3_handle *nic = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; + int ret; rtnl_lock(); - hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); - rtnl_unlock(); + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { + dev_warn(&hdev->pdev->dev, + "is resetting when updating port based vlan info\n"); + rtnl_unlock(); + return; + } + + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return; + } /* send msg to PF and wait update port based vlan info */ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_PORT_BASE_VLAN_CFG); memcpy(send_msg.data, port_base_vlan_info, data_size); - hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); - - if (state == HNAE3_PORT_BASE_VLAN_DISABLE) - nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; - else - nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (!ret) { + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = state; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + } - rtnl_lock(); hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); rtnl_unlock(); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 0245da02efbb0952b326e436a053fbcbd1a0f765..b735bc537508fcda0526238f7de5082d798598ff 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -814,6 +814,8 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) err_init_msix: err_pfhwdev_alloc: hinic_free_hwif(hwif); + if (err > 0) + err = -EIO; return ERR_PTR(err); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index c33eb114705572afc3adf27b17b340eeebf10314..e0f5a81d8620d7091ccd5b82b269ea899e9ed89a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -370,48 +370,89 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, MSG_NOT_RESP, timeout); } -/** - * mgmt_recv_msg_handler - handler for message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: received message details - **/ -static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, - struct hinic_recv_msg *recv_msg) +static void recv_mgmt_msg_work_handler(struct work_struct *work) { - struct hinic_hwif *hwif = pf_to_mgmt->hwif; - struct pci_dev *pdev = hwif->pdev; - u8 *buf_out = recv_msg->buf_out; + struct hinic_mgmt_msg_handle_work *mgmt_work = + container_of(work, struct hinic_mgmt_msg_handle_work, work); + struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt; + struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; + u8 *buf_out = pf_to_mgmt->mgmt_ack_buf; struct hinic_mgmt_cb *mgmt_cb; unsigned long cb_state; u16 out_size = 0; - if (recv_msg->mod >= HINIC_MOD_MAX) { + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); + + if (mgmt_work->mod >= HINIC_MOD_MAX) { dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", - recv_msg->mod); + mgmt_work->mod); + kfree(mgmt_work->msg); + kfree(mgmt_work); return; } - mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod]; + mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod]; cb_state = cmpxchg(&mgmt_cb->state, HINIC_MGMT_CB_ENABLED, HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb)) - mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd, - recv_msg->msg, recv_msg->msg_len, + mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd, + mgmt_work->msg, mgmt_work->msg_len, buf_out, &out_size); else dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n", - recv_msg->mod, recv_msg->cmd); + mgmt_work->mod, mgmt_work->cmd); mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; - if (!recv_msg->async_mgmt_to_pf) + if (!mgmt_work->async_mgmt_to_pf) /* MGMT sent sync msg, send the response */ - msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd, + msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd, buf_out, out_size, MGMT_RESP, - recv_msg->msg_id); + mgmt_work->msg_id); + + kfree(mgmt_work->msg); + kfree(mgmt_work); +} + +/** + * mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + struct hinic_mgmt_msg_handle_work *mgmt_work = NULL; + struct pci_dev *pdev = pf_to_mgmt->hwif->pdev; + + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); + if (!mgmt_work) { + dev_err(&pdev->dev, "Allocate mgmt work memory failed\n"); + return; + } + + if (recv_msg->msg_len) { + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); + if (!mgmt_work->msg) { + dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n"); + kfree(mgmt_work); + return; + } + } + + mgmt_work->pf_to_mgmt = pf_to_mgmt; + mgmt_work->msg_len = recv_msg->msg_len; + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); + mgmt_work->msg_id = recv_msg->msg_id; + mgmt_work->mod = recv_msg->mod; + mgmt_work->cmd = recv_msg->cmd; + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; + + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); + queue_work(pf_to_mgmt->workq, &mgmt_work->work); } /** @@ -546,6 +587,12 @@ static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt) if (!pf_to_mgmt->sync_msg_buf) return -ENOMEM; + pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev, + MAX_PF_MGMT_BUF_SIZE, + GFP_KERNEL); + if (!pf_to_mgmt->mgmt_ack_buf) + return -ENOMEM; + return 0; } @@ -571,6 +618,11 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, return 0; sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt"); + if (!pf_to_mgmt->workq) { + dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n"); + return -ENOMEM; + } pf_to_mgmt->sync_msg_id = 0; err = alloc_msg_buf(pf_to_mgmt); @@ -605,4 +657,5 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); hinic_api_cmd_free(pf_to_mgmt->cmd_chain); + destroy_workqueue(pf_to_mgmt->workq); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index c2b142c08b0e2f9c119095f544f44dfe421d313c..a824fbda59dbe9f0f55bd92d0b024ff0eb872dbc 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -119,6 +119,7 @@ struct hinic_pf_to_mgmt { struct semaphore sync_msg_lock; u16 sync_msg_id; u8 *sync_msg_buf; + void *mgmt_ack_buf; struct hinic_recv_msg recv_resp_msg_from_mgmt; struct hinic_recv_msg recv_msg_from_mgmt; @@ -126,6 +127,21 @@ struct hinic_pf_to_mgmt { struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX]; + + struct workqueue_struct *workq; +}; + +struct hinic_mgmt_msg_handle_work { + struct work_struct work; + struct hinic_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + + enum hinic_mod_type mod; + u8 cmd; + u16 msg_id; + int async_mgmt_to_pf; }; void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 0fd7eae25fe9d6f211549edf8eee0867aa7cfacf..5afb3c9c52d20a44d1cef41b0b1465e51269e9fe 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3206,7 +3206,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) req_tx_irq_failed: for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); - irq_dispose_mapping(adapter->rx_scrq[j]->irq); + irq_dispose_mapping(adapter->tx_scrq[j]->irq); } release_sub_crqs(adapter, 1); return rc; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index f999cca37a8ab7de5ab931dd7db114755400713a..489bb5b594759d49ed8be0bd4d4e4074170c973e 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; ret_val = e1000_disable_ulp_lpt_lp(hw, true); - if (ret_val) { + if (ret_val) e_warn("Failed to disable ULP\n"); - goto out; - } ret_val = hw->phy.ops.acquire(hw); if (ret_val) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8bb3db2cbd417f908a07eb3628372ca2ab539f53..6e5861bfb0fa9560aa79837efc8130ad33070a57 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6224,9 +6224,18 @@ static void igb_reset_task(struct work_struct *work) struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); + rtnl_unlock(); } /** diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index c639e3a293024fe48d685e129699de993c7cd098..7d5d9d34f4e470ceadc201f44caea0b6509fc1b4 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3959,7 +3959,7 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, /* When at 2.5G, the link partner can send frames with shortened * preambles. */ - if (state->speed == SPEED_2500) + if (state->interface == PHY_INTERFACE_MODE_2500BASEX) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; if (pp->phy_interface != state->interface) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 64786568af0dbf199976d4067a4ac6f329729a98..75a8c407e815c08c0a031f190c8bde7b1892fb50 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work) if (!netif_running(pf->netdev)) return; + rtnl_lock(); otx2_stop(pf->netdev); pf->reset_count++; otx2_open(pf->netdev); netif_trans_update(pf->netdev); + rtnl_unlock(); } static const struct net_device_ops otx2_netdev_ops = { @@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev) pf = netdev_priv(netdev); + cancel_work_sync(&pf->reset_task); /* Disable link notifications */ otx2_cgx_config_linkevents(pf, false); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index f4227517dc8e03b28753a50c5fdb19961b8624ba..92a3db69a6cd65f2526ed93977924429498805b1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev) vf = netdev_priv(netdev); + cancel_work_sync(&vf->reset_task); + unregister_netdev(netdev); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 241f007169797b8a21e4496610b700698bd1d8c1..fe54764caea9c7d6253de215fb198c2ab5333b40 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -203,7 +203,7 @@ static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val) static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg) { - u16 v; + u16 v = 0; __gm_phy_read(hw, port, reg, &v); return v; } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index f6a1f8666f950e31b539433804dfed54735fd7eb..a1c45b39a2301a38f8e3d8614c76e4637f9d36d4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, return 0; } -static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) +static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, + phy_interface_t interface, int speed) { u32 val; int ret; + if (interface == PHY_INTERFACE_MODE_TRGMII) { + mtk_w32(eth, TRGMII_MODE, INTF_MODE); + val = 500000000; + ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); + if (ret) + dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); + return; + } + val = (speed == SPEED_1000) ? INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100; mtk_w32(eth, val, INTF_MODE); @@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, state->interface)) goto err_phy; } else { - if (state->interface != - PHY_INTERFACE_MODE_TRGMII) - mtk_gmac0_rgmii_adjust(mac->hw, - state->speed); + mtk_gmac0_rgmii_adjust(mac->hw, + state->interface, + state->speed); /* mt7623_pad_clk_setup */ for (i = 0 ; i < NUM_TRGMII_CTRL; i++) @@ -2882,6 +2891,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) eth->netdev[id]->irq = eth->irq[0]; eth->netdev[id]->dev.of_node = np; + eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + return 0; free_netdev: diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 3d9aa7da95e95c1e1b76b45a6f42130f43c4d9af..2d3e45780719c2c21217a77d9bb7d2ee7cd6653e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4356,12 +4356,14 @@ static void mlx4_pci_resume(struct pci_dev *pdev) static void mlx4_shutdown(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); mutex_unlock(&persist->interface_state_mutex); + mlx4_pci_disable_device(dev); } static const struct pci_error_handlers mlx4_err_handler = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h index 7be6b2d36b60382041321aa741fff70e2fd9e524..9976de8b90478d5935e59e2a6ac4c6ce2b725570 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h @@ -29,6 +29,7 @@ struct mlx5e_dcbx { bool manual_buffer; u32 cable_len; u32 xoff; + u16 port_buff_cell_sz; }; #define MLX5E_MAX_DSCP (64) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 2a8950b3056f95445bc335835c0d694fa7d01011..3cf3e35053f7768de5010527139d1de02200499c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_400GAUI_8] = 400000, }; +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev) +{ + struct mlx5e_port_eth_proto eproto; + int err; + + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet)) + return true; + + err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto); + if (err) + return false; + + return !!eproto.cap; +} + static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, const u32 **arr, u32 *size, bool force_legacy) { - bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev); *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : ARRAY_SIZE(mlx5e_link_speed); @@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) bool ext; int err; - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = mlx5e_ptys_ext_supported(mdev); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) goto out; @@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) int err; int i; - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = mlx5e_ptys_ext_supported(mdev); err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index a2ddd446dd59e66911bb3f55ee0941c855c66c7c..7a7defe6079262678e1ff234069a87487a48d53b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, bool force_legacy); - +bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev); int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index ae99fac08b53260f38ba34a869d38e87614b3ec4..673f1c82d38155b4c71fa7cbae95f8c5bdbafae8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -34,6 +34,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, struct mlx5e_port_buffer *port_buffer) { + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5_core_dev *mdev = priv->mdev; int sz = MLX5_ST_SZ_BYTES(pbmc_reg); u32 total_used = 0; @@ -57,11 +58,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, port_buffer->buffer[i].epsb = MLX5_GET(bufferx_reg, buffer, epsb); port_buffer->buffer[i].size = - MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz; port_buffer->buffer[i].xon = - MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz; port_buffer->buffer[i].xoff = - MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz; total_used += port_buffer->buffer[i].size; mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i, @@ -73,7 +74,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, } port_buffer->port_buffer_size = - MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT; + MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz; port_buffer->spare_buffer_size = port_buffer->port_buffer_size - total_used; @@ -88,9 +89,9 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, static int port_set_buffer(struct mlx5e_priv *priv, struct mlx5e_port_buffer *port_buffer) { + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5_core_dev *mdev = priv->mdev; int sz = MLX5_ST_SZ_BYTES(pbmc_reg); - void *buffer; void *in; int err; int i; @@ -104,16 +105,18 @@ static int port_set_buffer(struct mlx5e_priv *priv, goto out; for (i = 0; i < MLX5E_MAX_BUFFER; i++) { - buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); - - MLX5_SET(bufferx_reg, buffer, size, - port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT); - MLX5_SET(bufferx_reg, buffer, lossy, - port_buffer->buffer[i].lossy); - MLX5_SET(bufferx_reg, buffer, xoff_threshold, - port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT); - MLX5_SET(bufferx_reg, buffer, xon_threshold, - port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT); + void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); + u64 size = port_buffer->buffer[i].size; + u64 xoff = port_buffer->buffer[i].xoff; + u64 xon = port_buffer->buffer[i].xon; + + do_div(size, port_buff_cell_sz); + do_div(xoff, port_buff_cell_sz); + do_div(xon, port_buff_cell_sz); + MLX5_SET(bufferx_reg, buffer, size, size); + MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy); + MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff); + MLX5_SET(bufferx_reg, buffer, xon_threshold, xon); } err = mlx5e_port_set_pbmc(mdev, in); @@ -143,7 +146,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) } static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, - u32 xoff, unsigned int max_mtu) + u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz) { int i; @@ -155,7 +158,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, } if (port_buffer->buffer[i].size < - (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) { + (xoff + max_mtu + port_buff_cell_sz)) { pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n", i, port_buffer->buffer[i].size); return -ENOMEM; @@ -175,6 +178,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, * @pfc_en: current pfc configuration * @buffer: current prio to buffer mapping * @xoff: xoff value + * @port_buff_cell_sz: port buffer cell_size * @port_buffer: port receive buffer configuration * @change: * @@ -189,7 +193,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, * sets change to true if buffer configuration was modified. */ static int update_buffer_lossy(unsigned int max_mtu, - u8 pfc_en, u8 *buffer, u32 xoff, + u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz, struct mlx5e_port_buffer *port_buffer, bool *change) { @@ -225,7 +229,7 @@ static int update_buffer_lossy(unsigned int max_mtu, } if (changed) { - err = update_xoff_threshold(port_buffer, xoff, max_mtu); + err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; @@ -262,6 +266,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, u32 *buffer_size, u8 *prio2buffer) { + u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; struct mlx5e_port_buffer port_buffer; u32 xoff = calculate_xoff(priv, mtu); bool update_prio2buffer = false; @@ -282,7 +287,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; } @@ -292,7 +297,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, + err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz, &port_buffer, &update_buffer); if (err) return err; @@ -304,7 +309,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (err) return err; - err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz, xoff, &port_buffer, &update_buffer); if (err) return err; @@ -329,7 +334,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, return -EINVAL; update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; } @@ -337,7 +342,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, /* Need to update buffer configuration if xoff value is changed */ if (!update_buffer && xoff != priv->dcbx.xoff) { update_buffer = true; - err = update_xoff_threshold(&port_buffer, xoff, max_mtu); + err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h index 34f55b81a0debf8d11a80c3a877f665f902d2e7e..80af7a5ac6046acb9073faa49cc89225c63d305c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -36,7 +36,6 @@ #include "port.h" #define MLX5E_MAX_BUFFER 8 -#define MLX5E_BUFFER_CELL_SHIFT 7 #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */ #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c index bdb71332cbf2d7936f5fc3c2b200cb5e5f09cfee..3e44e4d820c5131f664369df8ba219244f86f452 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c @@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev) { - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_priv *priv; /* A given netdev is not a representor or not a slave of LAG configuration */ if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev)) return false; + priv = netdev_priv(netdev); + rpriv = priv->ppriv; + /* Egress acl forward to vport is supported only non-uplink representor */ return rpriv->rep->vport != MLX5_VPORT_UPLINK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c index baa162432e75e31c83d5d2f130782c93c4aab669..c3d167fa944c7960d54d65de10e8c94b76395d0a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index eefeb1cdc2ee506002b2b170cb3e29045531b51e..245a99f69641d5d5fd6a4c988a1220f9489cce01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, } } - tun_dst = tun_rx_dst(enc_opts.key.len); + if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst, + key.enc_ip.tos, key.enc_ip.ttl, + key.enc_tp.dst, TUNNEL_KEY, + key32_to_tunnel_id(key.enc_key_id.keyid), + enc_opts.key.len); + } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst, + key.enc_ip.tos, key.enc_ip.ttl, + key.enc_tp.dst, 0, TUNNEL_KEY, + key32_to_tunnel_id(key.enc_key_id.keyid), + enc_opts.key.len); + } else { + netdev_dbg(priv->netdev, + "Couldn't restore tunnel, unsupported addr_type: %d\n", + key.enc_control.addr_type); + return false; + } + if (!tun_dst) { - WARN_ON_ONCE(true); + netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n"); return false; } - ip_tunnel_key_init(&tun_dst->u.tun_info.key, - key.enc_ipv4.src, key.enc_ipv4.dst, - key.enc_ip.tos, key.enc_ip.ttl, - 0, /* label */ - key.enc_tp.src, key.enc_tp.dst, - key32_to_tunnel_id(key.enc_key_id.keyid), - TUNNEL_KEY); + tun_dst->u.tun_info.key.tp_src = key.enc_tp.src; if (enc_opts.key.len) ip_tunnel_info_opts_set(&tun_dst->u.tun_info, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 430025550fad2ba7c222dc4b0ae5c81ec8211d2d..aad1c29b23db102e0f858327e7769c3cc445f0ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1097,6 +1097,7 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) struct mlx5_ct_entry *entry = ptr; mlx5_tc_ct_entry_del_rules(ct_priv, entry); + kfree(entry); } static void diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c index 951ea26d96bc3f0cc27cfb13c8a9b181e18bb64a..e472ed0eacfbc72b6e9e15a7bc7539a6bbe57df7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c @@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB); } + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c index 58b13192df23945394448031eb99524812333086..2805416c32a3cbfa373c0b08706d88f182ff4669 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c @@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv, gre_key.key, be32_to_cpu(enc_keyid.key->keyid)); } + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index 37b176801bccbd5e700f6e2cd9630590c3edfb02..038a0f1cecec63eb3199306d75fd888bde24dd12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, be32_to_cpu(enc_keyid.key->keyid)); + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index bc102d094bbd127212af43d3793ab3c08794c5f1..d20243d6a0326000643b6411d185354bf7bb0655 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1217,6 +1217,24 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) return 0; } +#define MLX5E_BUFFER_CELL_SHIFT 7 + +static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {}; + + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) + return (1 << MLX5E_BUFFER_CELL_SHIFT); + + if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), + MLX5_REG_SBCAM, 0, 0)) + return (1 << MLX5E_BUFFER_CELL_SHIFT); + + return MLX5_GET(sbcam_reg, out, cap_cell_size); +} + void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) { struct mlx5e_dcbx *dcbx = &priv->dcbx; @@ -1234,6 +1252,7 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) priv->dcbx.cap |= DCB_CAP_DCBX_HOST; + priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv); priv->dcbx.manual_buffer = false; priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index ec5658bbe3c57e169fc57d28811702b33394527f..c2464c349117e45bf6f50fc5d5e54c782dc0b4ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, struct ptys2ethtool_config **arr, u32 *size) { - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = mlx5e_ptys_ext_supported(mdev); *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : @@ -883,7 +883,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, struct ethtool_link_ksettings *link_ksettings) { unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; - bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + bool ext = mlx5e_ptys_ext_supported(mdev); ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); } @@ -913,7 +913,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, __func__, err); goto err_query_regs; } - ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_capability); eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, @@ -1066,7 +1066,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, autoneg = link_ksettings->base.autoneg; speed = link_ksettings->base.speed; - ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + ext_supported = mlx5e_ptys_ext_supported(mdev); ext = ext_requested(autoneg, adver, ext_supported); if (!ext_supported && ext) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a836a02a2116609eebaa64279bb0b659d42ada8d..3b892ec301b4a487201c6b5beab3799cd0a80180 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -419,7 +419,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; @@ -470,7 +470,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; @@ -3069,6 +3069,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; } +static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, + enum mlx5_port_status state) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int vport_admin_state; + + mlx5_set_port_admin_status(mdev, state); + + if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS) + return; + + if (state == MLX5_PORT_UP) + vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO; + else + vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN; + + mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state); +} + int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3101,12 +3120,9 @@ int mlx5e_open(struct net_device *netdev) mutex_lock(&priv->state_lock); err = mlx5e_open_locked(netdev); if (!err) - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); - if (mlx5_vxlan_allowed(priv->mdev->vxlan)) - udp_tunnel_get_rx_info(netdev); - return err; } @@ -3138,7 +3154,7 @@ int mlx5e_close(struct net_device *netdev) return -ENODEV; mutex_lock(&priv->state_lock); - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN); + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN); err = mlx5e_close_locked(netdev); mutex_unlock(&priv->state_lock); @@ -5121,6 +5137,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) if (err) goto err_destroy_flow_steering; +#ifdef CONFIG_MLX5_EN_ARFS + priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev); +#endif + return 0; err_destroy_flow_steering: @@ -5181,7 +5201,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) /* Marking the link as currently not needed by the Driver */ if (!netif_running(netdev)) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN); mlx5e_set_netdev_mtu_boundaries(priv); mlx5e_set_dev_port_mtu(priv); @@ -5202,6 +5222,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) rtnl_lock(); if (netif_running(netdev)) mlx5e_open(netdev); + if (mlx5_vxlan_allowed(priv->mdev->vxlan)) + udp_tunnel_get_rx_info(netdev); netif_device_attach(netdev); rtnl_unlock(); } @@ -5216,6 +5238,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) rtnl_lock(); if (netif_running(priv->netdev)) mlx5e_close(priv->netdev); + if (mlx5_vxlan_allowed(priv->mdev->vxlan)) + udp_tunnel_drop_rx_info(priv->netdev); netif_device_detach(priv->netdev); rtnl_unlock(); @@ -5288,10 +5312,6 @@ int mlx5e_netdev_init(struct net_device *netdev, /* netdev init */ netif_carrier_off(netdev); -#ifdef CONFIG_MLX5_EN_ARFS - netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev); -#endif - return 0; err_free_cpumask: @@ -5389,6 +5409,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) profile->cleanup_tx(priv); out: + set_bit(MLX5E_STATE_DESTROYING, &priv->state); + cancel_work_sync(&priv->update_stats_work); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 006807e04eda330f8e73a87d607db0b5604aed87..9519a61bd8ec55db301882a2ccbe4d16535ed4bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -936,6 +936,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { + mlx5e_ethtool_cleanup_steering(priv); rep_vport_rx_rule_destroy(priv); mlx5e_destroy_rep_root_ft(priv); mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); @@ -1080,6 +1081,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) mlx5e_rep_tc_enable(priv); + mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK, + 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO); mlx5_lag_add(mdev, netdev); priv->events_nb.notifier_call = uplink_rep_async_event; mlx5_notifier_register(mdev, &priv->events_nb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7fc84f58e28a4f9d96408aeb59dcbaed4cc84b60..fcedb5bdca9e57bda7d9c52cca8f910954ff6175 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2356,6 +2356,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, match.key->vlan_priority); *match_level = MLX5_MATCH_L2; + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; } } @@ -4670,9 +4671,10 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev, struct mlx5e_rep_priv *rpriv) { /* Offloaded flow rule is allowed to duplicate on non-uplink representor - * sharing tc block with other slaves of a lag device. + * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this + * function is called from NIC mode. */ - return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK; + return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK; } int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, @@ -4686,13 +4688,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, rcu_read_lock(); flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); - rcu_read_unlock(); if (flow) { /* Same flow rule offloaded to non-uplink representor sharing tc block, * just return 0. */ if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev) - goto out; + goto rcu_unlock; NL_SET_ERR_MSG_MOD(extack, "flow cookie already exists, ignoring"); @@ -4700,8 +4701,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, "flow cookie %lx already exists, ignoring\n", f->cookie); err = -EEXIST; - goto out; + goto rcu_unlock; } +rcu_unlock: + rcu_read_unlock(); + if (flow) + goto out; trace_mlx5e_configure_flower(f); err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index 5dc335e621c577898a52d9ee0380275c38bada7f..b68976b378b81a53d778af04ee50b754f7a29f2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -217,7 +217,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, } /* Create ingress allow rule */ - memset(spec, 0, sizeof(*spec)); spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 1116ab9bea6c579069f1e874997d965c7c7bc568..43005caff09e197e93021b8e7bbca9be2fa8a32d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1608,7 +1608,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); } - + esw_destroy_tsar(esw); return err; } @@ -1653,8 +1653,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) else if (esw->mode == MLX5_ESWITCH_OFFLOADS) esw_offloads_disable(esw); - esw_destroy_tsar(esw); - old_mode = esw->mode; esw->mode = MLX5_ESWITCH_NONE; @@ -1664,6 +1662,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); } + esw_destroy_tsar(esw); + if (clear_vf) mlx5_eswitch_clear_vf_vports_info(esw); } @@ -1826,6 +1826,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT; + int other_vport = 1; int err = 0; if (!ESW_ALLOWED(esw)) @@ -1833,15 +1835,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, if (IS_ERR(evport)) return PTR_ERR(evport); + if (vport == MLX5_VPORT_UPLINK) { + opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK; + other_vport = 0; + vport = 0; + } mutex_lock(&esw->state_lock); - err = mlx5_modify_vport_admin_state(esw->dev, - MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport, 1, link_state); + err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state); if (err) { - mlx5_core_warn(esw->dev, - "Failed to set vport %d link state, err = %d", - vport, err); + mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d", + vport, opmod, err); goto unlock; } @@ -1883,8 +1887,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); int err = 0; - if (!ESW_ALLOWED(esw)) - return -EPERM; if (IS_ERR(evport)) return PTR_ERR(evport); if (vlan > 4095 || qos > 7) @@ -1912,6 +1914,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, u8 set_flags = 0; int err; + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (vlan || qos) set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a5175e98c0b348539896535d02492f5c27a6c67b..5785596f13f5b1663ae2b8b04b40f2a0f0a767e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -680,6 +680,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } +static inline +int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) { return ERR_PTR(-EOPNOTSUPP); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 060354bb211adeaeb90c90caf9d64d3461dc8b7b..ed75353c56b8587807718e5ed7aa51573a3d31c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, return &esw->offloads.vport_reps[idx]; } +static void +mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr) +{ + if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) && + attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; +} static void mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, @@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, mlx5_eswitch_get_vport_metadata_mask()); spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); - if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc))) - spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; } else { misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); @@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; } - - if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) && - attr->in_rep->vport == MLX5_VPORT_UPLINK) - spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; } struct mlx5_flow_handle * @@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, goto err_esw_get; } + mlx5_eswitch_set_rule_flow_source(esw, spec, attr); + if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr, &flow_act, dest, i); @@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, i++; mlx5_eswitch_set_rule_source_port(esw, spec, attr); + mlx5_eswitch_set_rule_flow_source(esw, spec, attr); if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 13e2fb79c21ae6eba61735c4363bbff1bf6d5a06..2569bb6228b65a5dba498c0d3138ae0d0a49c2a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -797,7 +797,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, return ft; } -/* If reverse if false then return the first flow table in next priority of +/* If reverse is false then return the first flow table in next priority of * prio in the tree, else return the last flow table in the previous priority * of prio in the tree. */ @@ -829,34 +829,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) return find_closest_ft(prio, true); } -static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root, - struct mlx5_flow_namespace *ns) -{ - struct mlx5_flow_namespace *root_ns = &root->ns; - struct fs_prio *iter_prio; - struct fs_prio *prio; - - fs_get_obj(prio, ns->node.parent); - list_for_each_entry(iter_prio, &root_ns->node.children, node.list) { - if (iter_prio == prio && - !list_is_last(&prio->node.children, &iter_prio->node.list)) - return list_next_entry(iter_prio, node.list); - } - return NULL; -} - static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, struct mlx5_flow_act *flow_act) { - struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct fs_prio *prio; + bool next_ns; - if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) - prio = find_fwd_ns_prio(root, ft->ns); - else - fs_get_obj(prio, ft->node.parent); + next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS; + fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent); - return (prio) ? find_next_chained_ft(prio) : NULL; + return find_next_chained_ft(prio); } static int connect_fts_in_prio(struct mlx5_core_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index ef0706d15a5b7a29625470c69329f140513f699e..2d55b7c22c0348364354d4700d195f0de00d1f86 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, if (rq->extts.index >= clock->ptp_info.n_pins) return -EINVAL; + pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); + if (pin < 0) + return -EBUSY; + if (on) { - pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); - if (pin < 0) - return -EBUSY; pin_mode = MLX5_PIN_MODE_IN; pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); field_select = MLX5_MTPPS_FS_PIN_MODE | MLX5_MTPPS_FS_PATTERN | MLX5_MTPPS_FS_ENABLE; } else { - pin = rq->extts.index; field_select = MLX5_MTPPS_FS_ENABLE; } @@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, if (rq->perout.index >= clock->ptp_info.n_pins) return -EINVAL; - if (on) { - pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, - rq->perout.index); - if (pin < 0) - return -EBUSY; + pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + if (on) { pin_mode = MLX5_PIN_MODE_OUT; pattern = MLX5_OUT_PATTERN_PERIODIC; ts.tv_sec = rq->perout.period.sec; @@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, MLX5_MTPPS_FS_ENABLE | MLX5_MTPPS_FS_TIME_STAMP; } else { - pin = rq->perout.index; field_select = MLX5_MTPPS_FS_ENABLE; } @@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp, return 0; } +enum { + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0), + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1), +}; + static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { - return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); + + switch (func) { + case PTP_PF_NONE: + return 0; + case PTP_PF_EXTTS: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN); + case PTP_PF_PEROUT: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; } static const struct ptp_clock_info mlx5_ptp_clock_info = { @@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = { .verify = NULL, }; +static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin, + u32 *mtpps, u32 mtpps_size) +{ + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {}; + + MLX5_SET(mtpps_reg, in, pin, pin); + + return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps, + mtpps_size, MLX5_REG_MTPPS, 0, 0); +} + +static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin) +{ + struct mlx5_core_dev *mdev = clock->mdev; + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {}; + u8 mode; + int err; + + err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out)); + if (err || !MLX5_GET(mtpps_reg, out, enable)) + return PTP_PF_NONE; + + mode = MLX5_GET(mtpps_reg, out, pin_mode); + + if (mode == MLX5_PIN_MODE_IN) + return PTP_PF_EXTTS; + else if (mode == MLX5_PIN_MODE_OUT) + return PTP_PF_PEROUT; + + return PTP_PF_NONE; +} + static int mlx5_init_pin_config(struct mlx5_clock *clock) { int i; @@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock) sizeof(clock->ptp_info.pin_config[i].name), "mlx5_pps%d", i); clock->ptp_info.pin_config[i].index = i; - clock->ptp_info.pin_config[i].func = PTP_PF_NONE; - clock->ptp_info.pin_config[i].chan = i; + clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i); + clock->ptp_info.pin_config[i].chan = 0; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 9f829e68fc7344ccff938e4629513adb99784083..e4186e84b3ffbbbec4aa339a05dbd9d0b871309f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) return 0; } -static int mlx5_eeprom_page(int offset) +static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num, + u8 *module_id) +{ + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; + u32 out[MLX5_ST_SZ_DW(mcia_reg)]; + int err, status; + u8 *ptr; + + MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW); + MLX5_SET(mcia_reg, in, module, module_num); + MLX5_SET(mcia_reg, in, device_address, 0); + MLX5_SET(mcia_reg, in, page_number, 0); + MLX5_SET(mcia_reg, in, size, 1); + MLX5_SET(mcia_reg, in, l, 0); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MCIA, 0, 0); + if (err) + return err; + + status = MLX5_GET(mcia_reg, out, status); + if (status) { + mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + + *module_id = ptr[0]; + + return 0; +} + +static int mlx5_qsfp_eeprom_page(u16 offset) { if (offset < MLX5_EEPROM_PAGE_LENGTH) /* Addresses between 0-255 - page 00 */ @@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset) MLX5_EEPROM_HIGH_PAGE_LENGTH); } -static int mlx5_eeprom_high_page_offset(int page_num) +static int mlx5_qsfp_eeprom_high_page_offset(int page_num) { if (!page_num) /* Page 0 always start from low page */ return 0; @@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num) return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH; } +static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = MLX5_I2C_ADDR_LOW; + *page_num = mlx5_qsfp_eeprom_page(*offset); + *offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num); +} + +static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = MLX5_I2C_ADDR_LOW; + *page_num = 0; + + if (*offset < MLX5_EEPROM_PAGE_LENGTH) + return; + + *i2c_addr = MLX5_I2C_ADDR_HIGH; + *offset -= MLX5_EEPROM_PAGE_LENGTH; +} + int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, u16 offset, u16 size, u8 *data) { - int module_num, page_num, status, err; + int module_num, status, err, page_num = 0; + u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mcia_reg)]; - u32 in[MLX5_ST_SZ_DW(mcia_reg)]; - u16 i2c_addr; - void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + u16 i2c_addr = 0; + u8 module_id; + void *ptr; err = mlx5_query_module_num(dev, &module_num); if (err) return err; - memset(in, 0, sizeof(in)); - size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); - - /* Get the page number related to the given offset */ - page_num = mlx5_eeprom_page(offset); + err = mlx5_query_module_id(dev, module_num, &module_id); + if (err) + return err; - /* Set the right offset according to the page number, - * For page_num > 0, relative offset is always >= 128 (high page). - */ - offset -= mlx5_eeprom_high_page_offset(page_num); + switch (module_id) { + case MLX5_MODULE_ID_SFP: + mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + break; + case MLX5_MODULE_ID_QSFP: + case MLX5_MODULE_ID_QSFP_PLUS: + case MLX5_MODULE_ID_QSFP28: + mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset); + break; + default: + mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } if (offset + size > MLX5_EEPROM_PAGE_LENGTH) /* Cross pages read, read until offset 256 in low page */ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; - i2c_addr = MLX5_I2C_ADDR_LOW; + size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); MLX5_SET(mcia_reg, in, l, 0); MLX5_SET(mcia_reg, in, module, module_num); @@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, return -EIO; } + ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); memcpy(data, ptr, size); return size; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e9ccd333f61dd88787fb6fca919758a04f45f52a..71b6185b4904288b34b8032dc79b398f34448849 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -710,7 +710,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); if (err) - return err; + goto err_trap_register; err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core); if (err) @@ -722,6 +722,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) err_emad_trap_set: mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, mlxsw_core); +err_trap_register: destroy_workqueue(mlxsw_core->emad_wq); return err; } @@ -1813,7 +1814,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, bulk_list, cb, cb_priv, tid); if (err) { - kfree(trans); + kfree_rcu(trans, rcu); return err; } return 0; @@ -2050,11 +2051,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, break; } } - rcu_read_unlock(); - if (!found) + if (!found) { + rcu_read_unlock(); goto drop; + } rxl->func(skb, local_port, rxl_item->priv); + rcu_read_unlock(); return; drop: diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 08215fed193d309c64bf18286d2881afe4948527..a7d86df7123ff2ee4b3b9674eb56b5360ed28b1d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -45,7 +45,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, static int mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, u16 offset, u16 size, void *data, - unsigned int *p_read_size) + bool qsfp, unsigned int *p_read_size) { char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; char mcia_pl[MLXSW_REG_MCIA_LEN]; @@ -54,6 +54,10 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, int status; int err; + /* MCIA register accepts buffer size <= 48. Page of size 128 should be + * read by chunks of size 48, 48, 32. Align the size of the last chunk + * to avoid reading after the end of the page. + */ size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE); if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH && @@ -63,18 +67,25 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW; if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) { - page = MLXSW_REG_MCIA_PAGE_GET(offset); - offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page; - /* When reading upper pages 1, 2 and 3 the offset starts at - * 128. Please refer to "QSFP+ Memory Map" figure in SFF-8436 - * specification for graphical depiction. - * MCIA register accepts buffer size <= 48. Page of size 128 - * should be read by chunks of size 48, 48, 32. Align the size - * of the last chunk to avoid reading after the end of the - * page. - */ - if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) - size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset; + if (qsfp) { + /* When reading upper pages 1, 2 and 3 the offset + * starts at 128. Please refer to "QSFP+ Memory Map" + * figure in SFF-8436 specification for graphical + * depiction. + */ + page = MLXSW_REG_MCIA_PAGE_GET(offset); + offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page; + if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) + size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset; + } else { + /* When reading upper pages 1, 2 and 3 the offset + * starts at 0 and I2C high address is used. Please refer + * refer to "Memory Organization" figure in SFF-8472 + * specification for graphical depiction. + */ + i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH; + offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH; + } } mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr); @@ -166,7 +177,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, int err; err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset, - module_info, &read_size); + module_info, false, &read_size); if (err) return err; @@ -197,7 +208,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, /* Verify if transceiver provides diagnostic monitoring page */ err = mlxsw_env_query_module_eeprom(mlxsw_core, module, SFP_DIAGMON, 1, &diag_mon, - &read_size); + false, &read_size); if (err) return err; @@ -225,17 +236,22 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev, int offset = ee->offset; unsigned int read_size; int i = 0; + bool qsfp; int err; if (!ee->len) return -EINVAL; memset(data, 0, ee->len); + /* Validate module identifier value. */ + err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp); + if (err) + return err; while (i < ee->len) { err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset, ee->len - i, data + i, - &read_size); + qsfp, &read_size); if (err) { netdev_err(netdev, "Eeprom query failed\n"); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index fd0e97de44e7a4063da82b5c3e80ff4c7a231ae9..c04ec1a92826024654a325f3eb58c96e817a87c1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1414,23 +1414,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, u16 num_pages; int err; - mutex_init(&mlxsw_pci->cmd.lock); - init_waitqueue_head(&mlxsw_pci->cmd.wait); - mlxsw_pci->core = mlxsw_core; mbox = mlxsw_cmd_mbox_alloc(); if (!mbox) return -ENOMEM; - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); - if (err) - goto mbox_put; - - err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); - if (err) - goto err_out_mbox_alloc; - err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); if (err) goto err_sw_reset; @@ -1537,9 +1526,6 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, mlxsw_pci_free_irq_vectors(mlxsw_pci); err_alloc_irq: err_sw_reset: - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); -err_out_mbox_alloc: - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); mbox_put: mlxsw_cmd_mbox_free(mbox); return err; @@ -1553,8 +1539,6 @@ static void mlxsw_pci_fini(void *bus_priv) mlxsw_pci_aqs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); mlxsw_pci_free_irq_vectors(mlxsw_pci); - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); - mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); } static struct mlxsw_pci_queue * @@ -1776,6 +1760,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, }; +static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + mutex_init(&mlxsw_pci->cmd.lock); + init_waitqueue_head(&mlxsw_pci->cmd.wait); + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + if (err) + goto err_in_mbox_alloc; + + err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + if (err) + goto err_out_mbox_alloc; + + return 0; + +err_out_mbox_alloc: + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); +err_in_mbox_alloc: + mutex_destroy(&mlxsw_pci->cmd.lock); + return err; +} + +static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci) +{ + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); + mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); + mutex_destroy(&mlxsw_pci->cmd.lock); +} + static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const char *driver_name = pdev->driver->name; @@ -1831,6 +1846,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->pdev = pdev; pci_set_drvdata(pdev, mlxsw_pci); + err = mlxsw_pci_cmd_init(mlxsw_pci); + if (err) + goto err_pci_cmd_init; + mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; @@ -1848,6 +1867,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: + mlxsw_pci_cmd_fini(mlxsw_pci); +err_pci_cmd_init: iounmap(mlxsw_pci->hw_addr); err_ioremap: err_pci_resource_len_check: @@ -1865,6 +1886,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); + mlxsw_pci_cmd_fini(mlxsw_pci); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index fcb88d4271bfdeb5172f44d3fd5d8057ea2165c1..8ac987c8c8bc39f6242d674dae2787ffefe5452f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5536,6 +5536,7 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST, MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY, MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE, MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 770de0222e7bd06eed95a3bbb6c558b5f72c210b..0521e9d48c457df8c3b1a5469e176cd4c07b45aa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5001,15 +5001,6 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt) { - /* Packets with link-local destination IP arriving to the router - * are trapped to the CPU, so no need to program specific routes - * for them. Only allow prefix routes (usually one fe80::/64) so - * that packets are trapped for the right reason. - */ - if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) && - (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))) - return true; - /* Multicast routes aren't supported, so ignore them. Neighbour * Discovery packets are specifically trapped. */ @@ -6262,7 +6253,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, } fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); - if (WARN_ON(!fib_work)) + if (!fib_work) return NOTIFY_BAD; fib_work->mlxsw_sp = router->mlxsw_sp; @@ -8078,16 +8069,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp->router = router; router->mlxsw_sp = mlxsw_sp; - router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event; - err = register_inetaddr_notifier(&router->inetaddr_nb); - if (err) - goto err_register_inetaddr_notifier; - - router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event; - err = register_inet6addr_notifier(&router->inet6addr_nb); - if (err) - goto err_register_inet6addr_notifier; - INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list); err = __mlxsw_sp_router_init(mlxsw_sp); if (err) @@ -8128,12 +8109,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_neigh_init; - mlxsw_sp->router->netevent_nb.notifier_call = - mlxsw_sp_router_netevent_event; - err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb); - if (err) - goto err_register_netevent_notifier; - err = mlxsw_sp_mp_hash_init(mlxsw_sp); if (err) goto err_mp_hash_init; @@ -8142,6 +8117,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_dscp_init; + router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event; + err = register_inetaddr_notifier(&router->inetaddr_nb); + if (err) + goto err_register_inetaddr_notifier; + + router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event; + err = register_inet6addr_notifier(&router->inet6addr_nb); + if (err) + goto err_register_inet6addr_notifier; + + mlxsw_sp->router->netevent_nb.notifier_call = + mlxsw_sp_router_netevent_event; + err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb); + if (err) + goto err_register_netevent_notifier; + mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event; err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp), &mlxsw_sp->router->fib_nb, @@ -8152,10 +8143,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, return 0; err_register_fib_notifier: -err_dscp_init: -err_mp_hash_init: unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); err_register_netevent_notifier: + unregister_inet6addr_notifier(&router->inet6addr_nb); +err_register_inet6addr_notifier: + unregister_inetaddr_notifier(&router->inetaddr_nb); +err_register_inetaddr_notifier: + mlxsw_core_flush_owq(); +err_dscp_init: +err_mp_hash_init: mlxsw_sp_neigh_fini(mlxsw_sp); err_neigh_init: mlxsw_sp_vrs_fini(mlxsw_sp); @@ -8174,10 +8170,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, err_rifs_init: __mlxsw_sp_router_fini(mlxsw_sp); err_router_init: - unregister_inet6addr_notifier(&router->inet6addr_nb); -err_register_inet6addr_notifier: - unregister_inetaddr_notifier(&router->inetaddr_nb); -err_register_inetaddr_notifier: mutex_destroy(&mlxsw_sp->router->lock); kfree(mlxsw_sp->router); return err; @@ -8188,6 +8180,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &mlxsw_sp->router->fib_nb); unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); + unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); + unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); + mlxsw_core_flush_owq(); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); mlxsw_sp_mr_fini(mlxsw_sp); @@ -8197,8 +8192,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_ipips_fini(mlxsw_sp); mlxsw_sp_rifs_fini(mlxsw_sp); __mlxsw_sp_router_fini(mlxsw_sp); - unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); - unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); mutex_destroy(&mlxsw_sp->router->lock); kfree(mlxsw_sp->router); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 157a42c63066de73e3a018743c709013bd2132db..1e38dfe7cf64d3b2a8bdc98c0aa6f7329ac70b08 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -328,6 +328,9 @@ mlxsw_sp_trap_policer_items_arr[] = { { .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128), }, + { + .policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512), + }, }; static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = { @@ -421,6 +424,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = { .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, .priority = 2, }, + { + .group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19), + .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE, + .priority = 1, + }, { .group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15), .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6, @@ -882,11 +890,11 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { }, }, { - .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY, + .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY, TRAP), .listeners_arr = { - MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU, - false), + MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE, + TRAP_TO_CPU, false), }, }, { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 9cfe1fd98c3078f8a15eb25b12d7fdc0378073e7..f17da67a4622ee561073676a85a3412bccf3bfc1 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -748,21 +748,21 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) spin_unlock_irqrestore(&port->tx_skbs.lock, flags); - /* Next ts */ - ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); + /* Get the h/w timestamp */ + ocelot_get_hwtimestamp(ocelot, &ts); if (unlikely(!skb_match)) continue; - /* Get the h/w timestamp */ - ocelot_get_hwtimestamp(ocelot, &ts); - /* Set the timestamp into the skb */ memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); skb_tstamp_tx(skb_match, &shhwtstamps); dev_kfree_skb_any(skb_match); + + /* Next ts */ + ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); } } EXPORT_SYMBOL(ocelot_get_txtstamp); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 9b63574b6202681afb77bb3f0b59a02c638ed592..b5f1849fd280267f3116ff62af28599580448f0a 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -98,7 +98,7 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) { struct sk_buff **skb_ptr = NULL; struct sk_buff **temp; -#define NR_SKB_COMPLETED 128 +#define NR_SKB_COMPLETED 16 struct sk_buff *completed[NR_SKB_COMPLETED]; int more; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index d2708a57f2ffd8be65840a5985303b4c382a4ead..4075f5e599552c5eb21a79d832956d3ee6be4127 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -1299,19 +1299,21 @@ static int nixge_probe(struct platform_device *pdev) netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT); err = nixge_of_get_resources(pdev); if (err) - return err; + goto free_netdev; __nixge_hw_set_mac_address(ndev); priv->tx_irq = platform_get_irq_byname(pdev, "tx"); if (priv->tx_irq < 0) { netdev_err(ndev, "could not find 'tx' irq"); - return priv->tx_irq; + err = priv->tx_irq; + goto free_netdev; } priv->rx_irq = platform_get_irq_byname(pdev, "rx"); if (priv->rx_irq < 0) { netdev_err(ndev, "could not find 'rx' irq"); - return priv->rx_irq; + err = priv->rx_irq; + goto free_netdev; } priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index f7e3ce3de04dde321c98d37d1932113dd0cdba03..095561924bdc13a3039edb5b42501da40a297743 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -103,15 +103,18 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct ionic_lif *lif = netdev_priv(netdev); + unsigned int offset; unsigned int size; regs->version = IONIC_DEV_CMD_REG_VERSION; + offset = 0; size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); - memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size); + memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size); + offset += size; size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); - memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size); + memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size); } static int ionic_get_link_ksettings(struct net_device *netdev, @@ -468,12 +471,18 @@ static void ionic_get_ringparam(struct net_device *netdev, ring->rx_pending = lif->nrxq_descs; } +static void ionic_set_ringsize(struct ionic_lif *lif, void *arg) +{ + struct ethtool_ringparam *ring = arg; + + lif->ntxq_descs = ring->tx_pending; + lif->nrxq_descs = ring->rx_pending; +} + static int ionic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ionic_lif *lif = netdev_priv(netdev); - bool running; - int err; if (ring->rx_mini_pending || ring->rx_jumbo_pending) { netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n"); @@ -491,22 +500,7 @@ static int ionic_set_ringparam(struct net_device *netdev, ring->rx_pending == lif->nrxq_descs) return 0; - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); - if (err) - return err; - - running = test_bit(IONIC_LIF_F_UP, lif->state); - if (running) - ionic_stop(netdev); - - lif->ntxq_descs = ring->tx_pending; - lif->nrxq_descs = ring->rx_pending; - - if (running) - ionic_open(netdev); - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); - - return 0; + return ionic_reset_queues(lif, ionic_set_ringsize, ring); } static void ionic_get_channels(struct net_device *netdev, @@ -521,12 +515,17 @@ static void ionic_get_channels(struct net_device *netdev, ch->combined_count = lif->nxqs; } +static void ionic_set_queuecount(struct ionic_lif *lif, void *arg) +{ + struct ethtool_channels *ch = arg; + + lif->nxqs = ch->combined_count; +} + static int ionic_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct ionic_lif *lif = netdev_priv(netdev); - bool running; - int err; if (!ch->combined_count || ch->other_count || ch->rx_count || ch->tx_count) @@ -535,21 +534,7 @@ static int ionic_set_channels(struct net_device *netdev, if (ch->combined_count == lif->nxqs) return 0; - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); - if (err) - return err; - - running = test_bit(IONIC_LIF_F_UP, lif->state); - if (running) - ionic_stop(netdev); - - lif->nxqs = ch->combined_count; - - if (running) - ionic_open(netdev); - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); - - return 0; + return ionic_reset_queues(lif, ionic_set_queuecount, ch); } static u32 ionic_get_priv_flags(struct net_device *netdev) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index aaa00edd9d5b088152375b85e69179cd33bce5cf..e55d41546cff2d0ce81f615fda138c78623336b7 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -96,8 +96,7 @@ static void ionic_link_status_check(struct ionic_lif *lif) u16 link_status; bool link_up; - if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) || - test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state)) + if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state)) return; link_status = le16_to_cpu(lif->info->status.link_status); @@ -114,16 +113,22 @@ static void ionic_link_status_check(struct ionic_lif *lif) netif_carrier_on(netdev); } - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + mutex_lock(&lif->queue_lock); ionic_start_queues(lif); + mutex_unlock(&lif->queue_lock); + } } else { if (netif_carrier_ok(netdev)) { netdev_info(netdev, "Link down\n"); netif_carrier_off(netdev); } - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); + mutex_unlock(&lif->queue_lock); + } } clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state); @@ -863,8 +868,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) if (f) return 0; - netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr, - ctx.comp.rx_filter_add.filter_id); + netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); err = ionic_adminq_post_wait(lif, &ctx); @@ -893,6 +897,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) return -ENOENT; } + netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", + addr, f->filter_id); + ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); spin_unlock_bh(&lif->rx_filters.lock); @@ -901,9 +908,6 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) if (err && err != -EEXIST) return err; - netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, - ctx.cmd.rx_filter_del.filter_id); - return 0; } @@ -1313,7 +1317,7 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu) return err; netdev->mtu = new_mtu; - err = ionic_reset_queues(lif); + err = ionic_reset_queues(lif, NULL, NULL); return err; } @@ -1325,7 +1329,7 @@ static void ionic_tx_timeout_work(struct work_struct *ws) netdev_info(lif->netdev, "Tx Timeout recovery\n"); rtnl_lock(); - ionic_reset_queues(lif); + ionic_reset_queues(lif, NULL, NULL); rtnl_unlock(); } @@ -1351,13 +1355,11 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, }; int err; + netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid); err = ionic_adminq_post_wait(lif, &ctx); if (err) return err; - netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid, - ctx.comp.rx_filter_add.filter_id); - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); } @@ -1382,8 +1384,8 @@ static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, return -ENOENT; } - netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid, - le32_to_cpu(ctx.cmd.rx_filter_del.filter_id)); + netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", + vid, f->filter_id); ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); @@ -1673,6 +1675,14 @@ int ionic_open(struct net_device *netdev) if (err) goto err_out; + err = netif_set_real_num_tx_queues(netdev, lif->nxqs); + if (err) + goto err_txrx_deinit; + + err = netif_set_real_num_rx_queues(netdev, lif->nxqs); + if (err) + goto err_txrx_deinit; + /* don't start the queues until we have link */ if (netif_carrier_ok(netdev)) { err = ionic_start_queues(lif); @@ -1980,26 +1990,30 @@ static const struct net_device_ops ionic_netdev_ops = { .ndo_get_vf_stats = ionic_get_vf_stats, }; -int ionic_reset_queues(struct ionic_lif *lif) +int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg) { bool running; int err = 0; - err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET); - if (err) - return err; - + mutex_lock(&lif->queue_lock); running = netif_running(lif->netdev); if (running) { netif_device_detach(lif->netdev); err = ionic_stop(lif->netdev); + if (err) + goto reset_out; } - if (!err && running) { - ionic_open(lif->netdev); + + if (cb) + cb(lif, arg); + + if (running) { + err = ionic_open(lif->netdev); netif_device_attach(lif->netdev); } - clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state); +reset_out: + mutex_unlock(&lif->queue_lock); return err; } @@ -2146,7 +2160,9 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif) if (test_bit(IONIC_LIF_F_UP, lif->state)) { dev_info(ionic->dev, "Surprise FW stop, stopping queues\n"); + mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); + mutex_unlock(&lif->queue_lock); } if (netif_running(lif->netdev)) { @@ -2265,15 +2281,15 @@ static void ionic_lif_deinit(struct ionic_lif *lif) cancel_work_sync(&lif->deferred.work); cancel_work_sync(&lif->tx_timeout_work); ionic_rx_filters_deinit(lif); + if (lif->netdev->features & NETIF_F_RXHASH) + ionic_lif_rss_deinit(lif); } - if (lif->netdev->features & NETIF_F_RXHASH) - ionic_lif_rss_deinit(lif); - napi_disable(&lif->adminqcq->napi); ionic_lif_qcq_deinit(lif, lif->notifyqcq); ionic_lif_qcq_deinit(lif, lif->adminqcq); + mutex_destroy(&lif->queue_lock); ionic_lif_reset(lif); } @@ -2450,6 +2466,7 @@ static int ionic_lif_init(struct ionic_lif *lif) return err; lif->hw_index = le16_to_cpu(comp.hw_index); + mutex_init(&lif->queue_lock); /* now that we have the hw_index we can figure out our doorbell page */ lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index c3428034a17b2612b7eac23134ae6b5d19c76f27..8dc2c5d77424cbacf54b288be90b06bcff2b4911 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -135,7 +135,6 @@ enum ionic_lif_state_flags { IONIC_LIF_F_SW_DEBUG_STATS, IONIC_LIF_F_UP, IONIC_LIF_F_LINK_CHECK_REQUESTED, - IONIC_LIF_F_QUEUE_RESET, IONIC_LIF_F_FW_RESET, /* leave this as last */ @@ -165,6 +164,7 @@ struct ionic_lif { unsigned int hw_index; unsigned int kern_pid; u64 __iomem *kern_dbpage; + struct mutex queue_lock; /* lock for queue structures */ spinlock_t adminq_lock; /* lock for AdminQ operations */ struct ionic_qcq *adminqcq; struct ionic_qcq *notifyqcq; @@ -213,12 +213,6 @@ struct ionic_lif { #define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q) #define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q) -/* return 0 if successfully set the bit, else non-zero */ -static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname) -{ - return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE); -} - static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) { u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); @@ -248,6 +242,8 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units) return (units * div) / mult; } +typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg); + void ionic_link_status_check_request(struct ionic_lif *lif); void ionic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *ns); @@ -267,7 +263,7 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, int ionic_open(struct net_device *netdev); int ionic_stop(struct net_device *netdev); -int ionic_reset_queues(struct ionic_lif *lif); +int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg); static inline void debug_stats_txq_post(struct ionic_qcq *qcq, struct ionic_txq_desc *desc, bool dbell) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index 80eeb7696e0144768994509c26c1ed8e55e4e9c4..cd0076fc3044e20ea816c933f8c88afecb5e58eb 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -21,13 +21,16 @@ void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f) void ionic_rx_filter_replay(struct ionic_lif *lif) { struct ionic_rx_filter_add_cmd *ac; + struct hlist_head new_id_list; struct ionic_admin_ctx ctx; struct ionic_rx_filter *f; struct hlist_head *head; struct hlist_node *tmp; + unsigned int key; unsigned int i; int err; + INIT_HLIST_HEAD(&new_id_list); ac = &ctx.cmd.rx_filter_add; for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { @@ -58,9 +61,30 @@ void ionic_rx_filter_replay(struct ionic_lif *lif) ac->mac.addr); break; } + spin_lock_bh(&lif->rx_filters.lock); + ionic_rx_filter_free(lif, f); + spin_unlock_bh(&lif->rx_filters.lock); + + continue; } + + /* remove from old id list, save new id in tmp list */ + spin_lock_bh(&lif->rx_filters.lock); + hlist_del(&f->by_id); + spin_unlock_bh(&lif->rx_filters.lock); + f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id); + hlist_add_head(&f->by_id, &new_id_list); } } + + /* rebuild the by_id hash lists with the new filter ids */ + spin_lock_bh(&lif->rx_filters.lock); + hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) { + key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK; + head = &lif->rx_filters.by_id[key]; + hlist_add_head(&f->by_id, head); + } + spin_unlock_bh(&lif->rx_filters.lock); } int ionic_rx_filters_init(struct ionic_lif *lif) @@ -69,10 +93,12 @@ int ionic_rx_filters_init(struct ionic_lif *lif) spin_lock_init(&lif->rx_filters.lock); + spin_lock_bh(&lif->rx_filters.lock); for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]); INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]); } + spin_unlock_bh(&lif->rx_filters.lock); return 0; } @@ -84,11 +110,13 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif) struct hlist_node *tmp; unsigned int i; + spin_lock_bh(&lif->rx_filters.lock); for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { head = &lif->rx_filters.by_id[i]; hlist_for_each_entry_safe(f, tmp, head, by_id) ionic_rx_filter_free(lif, f); } + spin_unlock_bh(&lif->rx_filters.lock); } int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, @@ -124,6 +152,7 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); f->rxq_index = rxq_index; memcpy(&f->cmd, ac, sizeof(f->cmd)); + netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); INIT_HLIST_NODE(&f->by_hash); INIT_HLIST_NODE(&f->by_id); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index b7f900c11834ec96fabc0b4236f2c3bc4fe7e54e..85eb8f276a3701e10a06b4b2876148ce4bdac40c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -161,12 +161,6 @@ static void ionic_rx_clean(struct ionic_queue *q, return; } - /* no packet processing while resetting */ - if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) { - stats->dropped++; - return; - } - stats->pkts++; stats->bytes += le16_to_cpu(comp->len); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a49743d56b9c02141f13b29663a8aeb33c6d5114..6c2f9ff4a53e21c10b8dac1e404c0b93159e1813 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -876,6 +876,8 @@ struct qed_dev { struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM]; u8 engine_for_debug; bool disable_ilt_dump; + bool dbg_bin_dump; + DECLARE_HASHTABLE(connections, 10); const struct firmware *firmware; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 08ba9d54ab63a5b7c295de8f4e24754e3ec16cdf..d13ec88313c386d9ef05a751453630480c9b3336 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2008,8 +2008,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, enum protocol_type proto; if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { - DP_NOTICE(p_hwfn, - "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 81e8fbe4a05bb58fcf7f05571d02ecbfe43a3e88..3b9bbafafe68bfd733a81c5bc097887507eed65a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -7506,6 +7506,12 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->print_dbg_data) qed_dbg_print_feature(text_buf, text_size_bytes); + /* Just return the original binary buffer if requested */ + if (p_hwfn->cdev->dbg_bin_dump) { + vfree(text_buf); + return DBG_STATUS_OK; + } + /* Free the old dump_buf and point the dump_buf to the newly allocagted * and formatted text buffer. */ @@ -7733,7 +7739,9 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev) #define REGDUMP_HEADER_SIZE_SHIFT 0 #define REGDUMP_HEADER_SIZE_MASK 0xffffff #define REGDUMP_HEADER_FEATURE_SHIFT 24 -#define REGDUMP_HEADER_FEATURE_MASK 0x3f +#define REGDUMP_HEADER_FEATURE_MASK 0x1f +#define REGDUMP_HEADER_BIN_DUMP_SHIFT 29 +#define REGDUMP_HEADER_BIN_DUMP_MASK 0x1 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30 #define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1 #define REGDUMP_HEADER_ENGINE_SHIFT 31 @@ -7771,6 +7779,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev, feature, feature_size); SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature); + SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1); SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine); SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine); @@ -7794,6 +7803,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) omit_engine = 1; mutex_lock(&qed_dbg_lock); + cdev->dbg_bin_dump = true; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { @@ -7931,6 +7941,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } + /* Re-populate nvm attribute info */ + qed_mcp_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_populate(p_hwfn); + /* nvm cfg1 */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + @@ -7993,6 +8007,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc); } + cdev->dbg_bin_dump = false; mutex_unlock(&qed_dbg_lock); return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3aa51374e727f3fc2ceb0d5f90d38a925587e522..dbdac983ccde55aac270ed8dae95edbd1cb0d77c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3102,7 +3102,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } /* Log and clear previous pglue_b errors if such exist */ - qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); /* Enable the PF's internal FID_enable in the PXP */ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, @@ -4472,12 +4472,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return 0; } -static void qed_nvm_info_free(struct qed_hwfn *p_hwfn) -{ - kfree(p_hwfn->nvm_info.image_att); - p_hwfn->nvm_info.image_att = NULL; -} - static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_regview, void __iomem *p_doorbells, @@ -4562,7 +4556,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, return rc; err3: if (IS_LEAD_HWFN(p_hwfn)) - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); err2: if (IS_LEAD_HWFN(p_hwfn)) qed_iov_free_hw_info(p_hwfn->cdev); @@ -4623,7 +4617,7 @@ int qed_hw_prepare(struct qed_dev *cdev, if (rc) { if (IS_PF(cdev)) { qed_init_free(p_hwfn); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); qed_mcp_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); } @@ -4657,7 +4651,7 @@ void qed_hw_remove(struct qed_dev *cdev) qed_iov_free_hw_info(cdev); - qed_nvm_info_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); } static void qed_chain_free_next_ptr(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index b7b974f0ef210e16e2661c8b2c834dd5c7aead32..5eec1fc6229d943c1a80a71aec0e79638385910b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -257,9 +257,10 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) #define PGLUE_ATTENTION_ILT_VALID (1 << 23) -int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init) { + char msg[256]; u32 tmp; tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); @@ -273,22 +274,23 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - DP_NOTICE(p_hwfn, - "Illegal write by chip to [%08x:%08x] blocked.\n" - "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + snprintf(msg, sizeof(msg), + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID), + tmp, + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN)); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); @@ -321,8 +323,14 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); - if (tmp & PGLUE_ATTENTION_ICPL_VALID) - DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); + if (tmp & PGLUE_ATTENTION_ICPL_VALID) { + snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); + } tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); if (tmp & PGLUE_ATTENTION_ZLR_VALID) { @@ -361,7 +369,7 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) { - return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); + return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); } static int qed_fw_assertion(struct qed_hwfn *p_hwfn) @@ -1193,7 +1201,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) index, attn_bits, attn_acks, asserted_bits, deasserted_bits, p_sb_attn_sw->known_attn); } else if (asserted_bits == 0x100) { - DP_INFO(p_hwfn, "MFW indication via attention\n"); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication via attention\n"); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication [deassertion]\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index e09db33863670072e047b2d13304aa18a463afdc..110169e90121992c7112686596a8f48bba838ff4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -442,7 +442,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) -int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt); +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9624616806e70c552c02af8da694d8ad0485337c..0fd4520d06661cd825741a09919ea116e7f1a70c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3280,6 +3280,13 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) return rc; } +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = NULL; + p_hwfn->nvm_info.valid = false; +} + int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 5750b4c5ef63720579940308ced8cdecea42bbce..12a705ed4bacc66fee865a92952b1e6ade2135dd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1220,6 +1220,13 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); */ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); +/** + * @brief Delete nvm info shadow in the given hardware function + * + * @param p_hwfn + */ +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); + /** * @brief Get the engine affinity configuration. * diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 40efe60eff8d9e96f943a6b79459ede156479bed..fcdecddb2812298a8ceb228331bf2170f4a934df 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -47,15 +47,23 @@ static int rmnet_unregister_real_device(struct net_device *real_dev) return 0; } -static int rmnet_register_real_device(struct net_device *real_dev) +static int rmnet_register_real_device(struct net_device *real_dev, + struct netlink_ext_ack *extack) { struct rmnet_port *port; int rc, entry; ASSERT_RTNL(); - if (rmnet_is_real_dev_registered(real_dev)) + if (rmnet_is_real_dev_registered(real_dev)) { + port = rmnet_get_port_rtnl(real_dev); + if (port->rmnet_mode != RMNET_EPMODE_VND) { + NL_SET_ERR_MSG_MOD(extack, "bridge device already exists"); + return -EINVAL; + } + return 0; + } port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) @@ -133,7 +141,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); - err = rmnet_register_real_device(real_dev); + err = rmnet_register_real_device(real_dev, extack); if (err) goto err0; @@ -422,7 +430,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, } if (port->rmnet_mode != RMNET_EPMODE_VND) { - NL_SET_ERR_MSG_MOD(extack, "bridge device already exists"); + NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached"); return -EINVAL; } @@ -433,7 +441,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, return -EBUSY; } - err = rmnet_register_real_device(slave_dev); + err = rmnet_register_real_device(slave_dev, extack); if (err) return -EBUSY; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index a442bcf64b9cd875be72ef085cacb5deb8ba0ba9..99f7aae102ce12a15155c84d888af04229e80c85 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1450,6 +1450,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) struct ravb_private *priv = container_of(work, struct ravb_private, work); struct net_device *ndev = priv->ndev; + int error; netif_tx_stop_all_queues(ndev); @@ -1458,15 +1459,36 @@ static void ravb_tx_timeout_work(struct work_struct *work) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ - ravb_stop_dma(ndev); + if (ravb_stop_dma(ndev)) { + /* If ravb_stop_dma() fails, the hardware is still operating + * for TX and/or RX. So, this should not call the following + * functions because ravb_dmac_init() is possible to fail too. + * Also, this should not retry ravb_stop_dma() again and again + * here because it's possible to wait forever. So, this just + * re-enables the TX and RX and skip the following + * re-initialization procedure. + */ + ravb_rcv_snd_enable(ndev); + goto out; + } ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); /* Device init */ - ravb_dmac_init(ndev); + error = ravb_dmac_init(ndev); + if (error) { + /* If ravb_dmac_init() fails, descriptors are freed. So, this + * should return here to avoid re-enabling the TX and RX in + * ravb_emac_init(). + */ + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + return; + } ravb_emac_init(ndev); +out: /* Initialise PTP Clock driver */ if (priv->chip_id == RCAR_GEN2) ravb_ptp_init(ndev, priv->pdev); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 90410f9d3b1aaec270b358d5c2c9bdf259e62f62..1c4fea9c3ec4c88c3235f55fdbba73b430b8575f 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2274,7 +2274,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio, "power", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Optional reset GPIO configured? Minimum 100 ns reset needed @@ -2283,7 +2283,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio, "reset", 0, 0, 100); if (ret) - return ret; + goto out_free_netdev; /* * Need to wait for optional EEPROM to load, max 750 us according diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f2638446b62e69a9dcc8b5bd9b4e8f4cff6f0d6a..81b554dd7221bc3038393940b7e87385e0f8c4b9 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1191,7 +1191,7 @@ static int ave_init(struct net_device *ndev) ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, priv->pinmode_mask, priv->pinmode_val); if (ret) - return ret; + goto out_reset_assert; ave_global_reset(ndev); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 1492648247d96ce7966a93bc5a4813bbb625517a..6d778bc3d012f3b223f862a40b20bff6671a4677 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1850,7 +1850,8 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE; port->ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_HW_CSUM; + NETIF_F_HW_CSUM | + NETIF_F_HW_TC; port->ndev->features = port->ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; port->ndev->vlan_features |= NETIF_F_SG; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 4661ef865807f0d7fb603914cb9af325df7b7b6b..dec52b763d5080731e219b4ea8f375e17d0703bf 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1615,11 +1615,11 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct geneve_dev *geneve = netdev_priv(dev); + enum ifla_geneve_df df = geneve->df; struct geneve_sock *gs4, *gs6; struct ip_tunnel_info info; bool metadata; bool use_udp6_rx_checksums; - enum ifla_geneve_df df; bool ttl_inherit; int err; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 2a6ec53949666190e81a8f3df02d177a149240f6..a4b3fce69ecd96ec9e40274288b62e8b58320299 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -1242,7 +1242,7 @@ static int rr_open(struct net_device *dev) rrpriv->info = NULL; } if (rrpriv->rx_ctrl) { - pci_free_consistent(pdev, sizeof(struct ring_ctrl), + pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl), rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); rrpriv->rx_ctrl = NULL; } diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 5a37514e423477cee4a4de4ccda29937a25e688b..c11f32f644db3ce27a81d41c27593f5d3821d7e7 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -4,7 +4,7 @@ * * Copyright 2009-2017 Analog Devices Inc. * - * http://www.analog.com/ADF7242 + * https://www.analog.com/ADF7242 */ #include @@ -1262,7 +1262,7 @@ static int adf7242_probe(struct spi_device *spi) WQ_MEM_RECLAIM); if (unlikely(!lp->wqueue)) { ret = -ENOMEM; - goto err_hw_init; + goto err_alloc_wq; } ret = adf7242_hw_init(lp); @@ -1294,6 +1294,8 @@ static int adf7242_probe(struct spi_device *spi) return ret; err_hw_init: + destroy_workqueue(lp->wqueue); +err_alloc_wq: mutex_destroy(&lp->bmux); ieee802154_free_hw(lp->hw); diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 55226b264e3c45391d1b7b83b1e06d7ac4a37a27..ac7e5a04c8ac9f4857c9a306c2da0f142967ad27 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -500,6 +500,13 @@ static int gsi_channel_stop_command(struct gsi_channel *channel) int ret; state = gsi_channel_state(channel); + + /* Channel could have entered STOPPED state since last call + * if it timed out. If so, we're done. + */ + if (state == GSI_CHANNEL_STATE_STOPPED) + return 0; + if (state != GSI_CHANNEL_STATE_STARTED && state != GSI_CHANNEL_STATE_STOP_IN_PROC) return -EINVAL; @@ -789,20 +796,11 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id) int gsi_channel_stop(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; - enum gsi_channel_state state; u32 retries; int ret; gsi_channel_freeze(channel); - /* Channel could have entered STOPPED state since last call if the - * STOP command timed out. We won't stop a channel if stopping it - * was successful previously (so we still want the freeze above). - */ - state = gsi_channel_state(channel); - if (state == GSI_CHANNEL_STATE_STOPPED) - return 0; - /* RX channels might require a little time to enter STOPPED state */ retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES; diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index c9ab865e729062cfbd34228d81dd6cd3e8f4566d..d92dd3f09b735ad0545fc544f5333f058f7629fe 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -586,6 +586,21 @@ u32 ipa_cmd_tag_process_count(void) return 4; } +void ipa_cmd_tag_process(struct ipa *ipa) +{ + u32 count = ipa_cmd_tag_process_count(); + struct gsi_trans *trans; + + trans = ipa_cmd_trans_alloc(ipa, count); + if (trans) { + ipa_cmd_tag_process_add(trans); + gsi_trans_commit_wait(trans); + } else { + dev_err(&ipa->pdev->dev, + "error allocating %u entry tag transaction\n", count); + } +} + static struct ipa_cmd_info * ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) { diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index e440aa69c8b5bc20e0cf340c9ea4b6890bce7111..1a646e0264a05ca985bfebd929c459b677c0bfc8 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -171,6 +171,14 @@ void ipa_cmd_tag_process_add(struct gsi_trans *trans); */ u32 ipa_cmd_tag_process_count(void); +/** + * ipa_cmd_tag_process() - Perform a tag process + * + * @Return: The number of elements to allocate in a transaction + * to hold tag process commands + */ +void ipa_cmd_tag_process(struct ipa *ipa); + /** * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint * @ipa: IPA pointer diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c index 52d4b84e0dac6568afff2c1a8677c879ab8936ce..de2768d71ab56b6dc38ad6c0a1549b9620fc1abb 100644 --- a/drivers/net/ipa/ipa_data-sdm845.c +++ b/drivers/net/ipa/ipa_data-sdm845.c @@ -44,7 +44,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .seq_type = IPA_SEQ_INVALID, .config = { - .checksum = true, .aggregation = true, .status_enable = true, .rx = { diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 9f50d0d11704c075a03c0d5512b2b1074852a30e..9e58e495d3731c1bc274d2e8e1b1e5ffccdebcc7 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1450,6 +1450,8 @@ void ipa_endpoint_suspend(struct ipa *ipa) if (ipa->modem_netdev) ipa_modem_suspend(ipa->modem_netdev); + ipa_cmd_tag_process(ipa); + ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); } diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c index dc4a5c2196aeeb8b431ab3840db67afd5292722c..d323adb03383f6e9b17dc64c4e7657f960ede86b 100644 --- a/drivers/net/ipa/ipa_gsi.c +++ b/drivers/net/ipa/ipa_gsi.c @@ -6,6 +6,7 @@ #include +#include "ipa_gsi.h" #include "gsi_trans.h" #include "ipa.h" #include "ipa_endpoint.h" diff --git a/drivers/net/ipa/ipa_gsi.h b/drivers/net/ipa/ipa_gsi.h index 3cf18600c68e8d676bbbe29930c8d2e6f77c5519..0a40f3dc55fca4736cc5c50feb6fafd1d29e4d04 100644 --- a/drivers/net/ipa/ipa_gsi.h +++ b/drivers/net/ipa/ipa_gsi.h @@ -8,7 +8,9 @@ #include +struct gsi; struct gsi_trans; +struct ipa_gsi_endpoint_data; /** * ipa_gsi_trans_complete() - GSI transaction completion callback diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c index 03a1d0e559644bb57654b9e9ee49596dcdb9b99c..73413371e3d3eaaacc516aca860ea9592d74998b 100644 --- a/drivers/net/ipa/ipa_qmi_msg.c +++ b/drivers/net/ipa/ipa_qmi_msg.c @@ -119,7 +119,7 @@ struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = { sizeof_field(struct ipa_driver_init_complete_rsp, rsp), .tlv_type = 0x02, - .elem_size = offsetof(struct ipa_driver_init_complete_rsp, + .offset = offsetof(struct ipa_driver_init_complete_rsp, rsp), .ei_array = qmi_response_type_v01_ei, }, @@ -137,7 +137,7 @@ struct qmi_elem_info ipa_init_complete_ind_ei[] = { sizeof_field(struct ipa_init_complete_ind, status), .tlv_type = 0x02, - .elem_size = offsetof(struct ipa_init_complete_ind, + .offset = offsetof(struct ipa_init_complete_ind, status), .ei_array = qmi_response_type_v01_ei, }, @@ -218,7 +218,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = { sizeof_field(struct ipa_init_modem_driver_req, platform_type_valid), .tlv_type = 0x10, - .elem_size = offsetof(struct ipa_init_modem_driver_req, + .offset = offsetof(struct ipa_init_modem_driver_req, platform_type_valid), }, { diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index e56547bfdac9a909a159bfdb5aa67014d159476b..9159846b8b9388644bcf8f136231726d8cf297f2 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -4052,9 +4052,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, return err; netdev_lockdep_set_classes(dev); - lockdep_set_class_and_subclass(&dev->addr_list_lock, - &macsec_netdev_addr_lock_key, - dev->lower_level); + lockdep_set_class(&dev->addr_list_lock, + &macsec_netdev_addr_lock_key); err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6a6cc9f75307566ec108e26aad9599633d7b6577..4942f6112e51f838057974c2e386ba593f939cba 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -880,9 +880,8 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; static void macvlan_set_lockdep_class(struct net_device *dev) { netdev_lockdep_set_classes(dev); - lockdep_set_class_and_subclass(&dev->addr_list_lock, - &macvlan_netdev_addr_lock_key, - dev->lower_level); + lockdep_set_class(&dev->addr_list_lock, + &macvlan_netdev_addr_lock_key); } static int macvlan_init(struct net_device *dev) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 2908e0a0d6e19aab337634e43b03f0296d0102aa..23950e7a0f81ef11365dbbb6891052d82be6d65c 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -302,7 +302,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) rtnl_lock(); err = nsim_bpf_init(ns); if (err) - goto err_free_netdev; + goto err_rtnl_unlock; nsim_ipsec_init(ns); @@ -316,8 +316,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) err_ipsec_teardown: nsim_ipsec_teardown(ns); nsim_bpf_uninit(ns); +err_rtnl_unlock: rtnl_unlock(); -err_free_netdev: free_netdev(dev); return ERR_PTR(err); } diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index ecbd5e0d685cf3cc3b3d46fbb2f1456c44ed0472..acb0aae607558f76f147ad4b4c2b9bb2eb9269a9 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1260,6 +1260,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V1; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: @@ -1267,6 +1268,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: @@ -1274,6 +1276,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: @@ -1281,6 +1284,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) dp83640->hwts_rx_en = 1; dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2; dp83640->version = PTP_CLASS_V2; + cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 858b012074bd1e733ccaf6255dd733a6eef432b1..7adeb91bd368dceab1357c4ec007572a7784a091 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include #include @@ -1351,6 +1352,7 @@ static void tun_net_init(struct net_device *dev) switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: dev->netdev_ops = &tun_netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; /* Point-to-Point TUN Device */ dev->hard_header_len = 0; diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 4e514f5d7c6c7c2a2af2cbb3e214c902bc8c7302..fd3a04d98dc14b4a2786a23bd40f2f4b3845dcbe 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -187,6 +187,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); if (ret < ETH_ALEN) { netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); + ret = -EIO; goto free; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bb8c34d746ab33a8cd187baea128d9525e9a3c9f..d2fdb5430d27265a409fe4d4779c99e7f4864840 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1390,8 +1390,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) unsigned long flags; if (old) - hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", - tty->termios.c_cflag, old->c_cflag); + hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", + (unsigned int)tty->termios.c_cflag, + (unsigned int)old->c_cflag); /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); @@ -2260,12 +2261,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, minor = get_free_serial_index(); if (minor < 0) - goto exit; + goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, tty_drv, minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); + if (IS_ERR(serial->parent->dev)) + goto exit2; /* fill in specific data for later use */ serial->minor = minor; @@ -2310,6 +2313,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, return 0; exit: hso_serial_tty_unregister(serial); +exit2: hso_serial_common_free(serial); return -1; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index eccbf4cd71496b07da41562ea72dbfe1b127ee93..442507f25aadb7dca5bd84787a8d85a1a1b0822e 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -377,10 +377,6 @@ struct lan78xx_net { struct tasklet_struct bh; struct delayed_work wq; - struct usb_host_endpoint *ep_blkin; - struct usb_host_endpoint *ep_blkout; - struct usb_host_endpoint *ep_intr; - int msg_enable; struct urb *urb_intr; @@ -2860,78 +2856,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) return NETDEV_TX_OK; } -static int -lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) -{ - int tmp; - struct usb_host_interface *alt = NULL; - struct usb_host_endpoint *in = NULL, *out = NULL; - struct usb_host_endpoint *status = NULL; - - for (tmp = 0; tmp < intf->num_altsetting; tmp++) { - unsigned ep; - - in = NULL; - out = NULL; - status = NULL; - alt = intf->altsetting + tmp; - - for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { - struct usb_host_endpoint *e; - int intr = 0; - - e = alt->endpoint + ep; - switch (e->desc.bmAttributes) { - case USB_ENDPOINT_XFER_INT: - if (!usb_endpoint_dir_in(&e->desc)) - continue; - intr = 1; - /* FALLTHROUGH */ - case USB_ENDPOINT_XFER_BULK: - break; - default: - continue; - } - if (usb_endpoint_dir_in(&e->desc)) { - if (!intr && !in) - in = e; - else if (intr && !status) - status = e; - } else { - if (!out) - out = e; - } - } - if (in && out) - break; - } - if (!alt || !in || !out) - return -EINVAL; - - dev->pipe_in = usb_rcvbulkpipe(dev->udev, - in->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->pipe_out = usb_sndbulkpipe(dev->udev, - out->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->ep_intr = status; - - return 0; -} - static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) { struct lan78xx_priv *pdata = NULL; int ret; int i; - ret = lan78xx_get_endpoints(dev, intf); - if (ret) { - netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", - ret); - return ret; - } - dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); pdata = (struct lan78xx_priv *)(dev->data[0]); @@ -3700,6 +3630,7 @@ static void lan78xx_stat_monitor(struct timer_list *t) static int lan78xx_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr; struct lan78xx_net *dev; struct net_device *netdev; struct usb_device *udev; @@ -3748,6 +3679,34 @@ static int lan78xx_probe(struct usb_interface *intf, mutex_init(&dev->stats.access_lock); + if (intf->cur_altsetting->desc.bNumEndpoints < 3) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); + ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in); + if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); + ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out); + if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) { + ret = -ENODEV; + goto out2; + } + + ep_intr = &intf->cur_altsetting->endpoint[2]; + if (!usb_endpoint_is_int_in(&ep_intr->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_intr = usb_rcvintpipe(dev->udev, + usb_endpoint_num(&ep_intr->desc)); + ret = lan78xx_bind(dev, intf); if (ret < 0) goto out2; @@ -3759,18 +3718,7 @@ static int lan78xx_probe(struct usb_interface *intf, netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); - dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; - dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; - dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; - - dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); - dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); - - dev->pipe_intr = usb_rcvintpipe(dev->udev, - dev->ep_intr->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - period = dev->ep_intr->desc.bInterval; - + period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); buf = kmalloc(maxp, GFP_KERNEL); if (buf) { @@ -3783,6 +3731,7 @@ static int lan78xx_probe(struct usb_interface *intf, usb_fill_int_urb(dev->urb_intr, dev->udev, dev->pipe_intr, buf, maxp, intr_complete, dev, period); + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; } } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 31b1d4b959f68807d2e1d2505a46b9a58f953f54..07c42c0719f5b1bf472d2a4bc5471ca180e5543d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1370,6 +1370,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 3cf4dc3433f91e6566a26b7cf811b7861cbd7d6a..bb4ccbda031abb7e261ddf90ef16f8c679d5cfd6 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1287,11 +1287,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) /* Init all registers */ ret = smsc95xx_reset(dev); + if (ret) + goto free_pdata; /* detect device revision as different features may be available */ ret = smsc95xx_read_reg(dev, ID_REV, &val); if (ret < 0) - return ret; + goto free_pdata; + val >>= 16; pdata->chip_id = val; pdata->mdix_ctrl = get_mdix_status(dev->net); @@ -1317,6 +1320,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); return 0; + +free_pdata: + kfree(pdata); + return ret; } static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 89d85dcb200e88548dd563e9e98e1ac1bfdcf8c6..a7c3939264b005a60d4b6aeef33b617d0cf305e1 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1376,6 +1376,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, for (h = 0; h < FDB_HASH_SIZE; ++h) { struct vxlan_fdb *f; + rcu_read_lock(); hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; @@ -1387,8 +1388,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, NULL); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip_nh: *idx += 1; continue; @@ -1403,12 +1406,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, rd); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip: *idx += 1; } } + rcu_read_unlock(); } out: return err; @@ -3070,8 +3076,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP))) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ - if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f, true, true); + if (is_zero_ether_addr(f->eth_addr) && + f->vni == vxlan->cfg.vni) + continue; + vxlan_fdb_destroy(vxlan, f, true, true); } spin_unlock_bh(&vxlan->hash_lock[h]); } diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index c84536b03aa84e5cce1637238ce065512dc7b390..f70336bb6f5244e3064a20a28f06dec5a0f9103f 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -71,8 +71,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - if (skb_cow(skb, 1)) + if (skb_cow(skb, 1)) { + kfree_skb(skb); return NET_RX_DROP; + } skb_push(skb, 1); skb_reset_network_header(skb); diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index e30d91a38cfb637b376eab0e963ab0ade09fbbb5..b2868433718f61dbf70cfab88576dd73a9fce54e 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -128,10 +128,12 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb) { unsigned char *ptr; - skb_push(skb, 1); - - if (skb_cow(skb, 1)) + if (skb_cow(skb, 1)) { + kfree_skb(skb); return NET_RX_DROP; + } + + skb_push(skb, 1); ptr = skb->data; *ptr = X25_IFACE_DATA; @@ -303,7 +305,6 @@ static void lapbeth_setup(struct net_device *dev) dev->netdev_ops = &lapbeth_netdev_ops; dev->needs_free_netdev = true; dev->type = ARPHRD_X25; - dev->hard_header_len = 3; dev->mtu = 1000; dev->addr_len = 0; } @@ -324,6 +325,14 @@ static int lapbeth_new_device(struct net_device *dev) if (!ndev) goto out; + /* When transmitting data: + * first this driver removes a pseudo header of 1 byte, + * then the lapb module prepends an LAPB header of at most 3 bytes, + * then this driver prepends a length field of 2 bytes, + * then the underlying Ethernet device prepends its own header. + */ + ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len; + lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 69773d228ec175e03fc4f555f148786d5bb42d75..84640a0c13f3590d456ab16de203d3a02dfc13b5 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl) netif_wake_queue(sl->dev); } -/* Send one completely decapsulated IP datagram to the IP layer. */ +/* Send an LAPB frame to the LAPB module to process. */ static void x25_asy_bump(struct x25_asy *sl) { @@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl) count = sl->rcount; dev->stats.rx_bytes += count; - skb = dev_alloc_skb(count+1); + skb = dev_alloc_skb(count); if (skb == NULL) { netdev_warn(sl->dev, "memory squeeze, dropping packet\n"); dev->stats.rx_dropped++; return; } - skb_push(skb, 1); /* LAPB internal control */ skb_put_data(skb, sl->rbuff, count); skb->protocol = x25_type_trans(skb, sl->dev); err = lapb_data_received(skb->dev, skb); @@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl) kfree_skb(skb); printk(KERN_DEBUG "x25_asy: data received err - %d\n", err); } else { - netif_rx(skb); dev->stats.rx_packets++; } } @@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, */ /* - * Called when I frame data arrives. We did the work above - throw it - * at the net layer. + * Called when I frame data arrive. We add a pseudo header for upper + * layers and pass it to upper layers. */ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) { + if (skb_cow(skb, 1)) { + kfree_skb(skb); + return NET_RX_DROP; + } + skb_push(skb, 1); + skb->data[0] = X25_IFACE_DATA; + + skb->protocol = x25_type_trans(skb, dev); + return netif_rx(skb); } @@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) switch (s) { case X25_END: if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount > 2) + sl->rcount >= 2) x25_asy_bump(sl); clear_bit(SLF_ESCAPE, &sl->flags); sl->rcount = 0; diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index a8f151b1b5fab52d061efdb3e49975076c93d3ac..c9f65e96ccb04f12b7ce788b1a122458f9663afc 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -262,6 +262,7 @@ static void wg_setup(struct net_device *dev) max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); dev->netdev_ops = &netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; dev->hard_header_len = 0; dev->addr_len = 0; dev->needed_headroom = DATA_PACKET_HEAD_ROOM; diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h index c58df439dbbe09bf50656707a7d6cd86171886a7..dfb674e030764ac83fc53d8aab0af6b7a04e8ad9 100644 --- a/drivers/net/wireguard/queueing.h +++ b/drivers/net/wireguard/queueing.h @@ -11,6 +11,7 @@ #include #include #include +#include struct wg_device; struct wg_peer; @@ -65,25 +66,9 @@ struct packet_cb { #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) -/* Returns either the correct skb->protocol value, or 0 if invalid. */ -static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) -{ - if (skb_network_header(skb) >= skb->head && - (skb_network_header(skb) + sizeof(struct iphdr)) <= - skb_tail_pointer(skb) && - ip_hdr(skb)->version == 4) - return htons(ETH_P_IP); - if (skb_network_header(skb) >= skb->head && - (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= - skb_tail_pointer(skb) && - ipv6_hdr(skb)->version == 6) - return htons(ETH_P_IPV6); - return 0; -} - static inline bool wg_check_packet_protocol(struct sk_buff *skb) { - __be16 real_protocol = wg_examine_packet_protocol(skb); + __be16 real_protocol = ip_tunnel_parse_protocol(skb); return real_protocol && skb->protocol == real_protocol; } diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c index 9b2ab6fc91cdd9bb1cb60e235cab33be5438dd6f..2c9551ea6dc739432d210d2517c6c719fc627bf7 100644 --- a/drivers/net/wireguard/receive.c +++ b/drivers/net/wireguard/receive.c @@ -387,7 +387,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer, */ skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ~0; /* All levels */ - skb->protocol = wg_examine_packet_protocol(skb); + skb->protocol = ip_tunnel_parse_protocol(skb); if (skb->protocol == htons(ETH_P_IP)) { len = ntohs(ip_hdr(skb)->tot_len); if (unlikely(len < sizeof(struct iphdr))) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 342a7e58018acd4393e829137e7e882d7da8ddfd..05a61975c83f4bf26c04c8510f64d6dd5225769d 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -820,7 +820,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev) ath10k_ahb_release_irq_legacy(ar); err_free_pipes: - ath10k_pci_free_pipes(ar); + ath10k_pci_release_resource(ar); err_resource_deinit: ath10k_ahb_resource_deinit(ar); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 1d941d53fdc9ff2300aaceb8264afb793de88f9e..cfde7791291ab388061f3cd69ebcdad8a97a2554 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3473,6 +3473,28 @@ int ath10k_pci_setup_resource(struct ath10k *ar) timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); + ar_pci->attr = kmemdup(pci_host_ce_config_wlan, + sizeof(pci_host_ce_config_wlan), + GFP_KERNEL); + if (!ar_pci->attr) + return -ENOMEM; + + ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, + sizeof(pci_target_ce_config_wlan), + GFP_KERNEL); + if (!ar_pci->pipe_config) { + ret = -ENOMEM; + goto err_free_attr; + } + + ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, + sizeof(pci_target_service_to_ce_map_wlan), + GFP_KERNEL); + if (!ar_pci->serv_to_pipe) { + ret = -ENOMEM; + goto err_free_pipe_config; + } + if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) ath10k_pci_override_ce_config(ar); @@ -3480,18 +3502,31 @@ int ath10k_pci_setup_resource(struct ath10k *ar) if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ret); - return ret; + goto err_free_serv_to_pipe; } return 0; + +err_free_serv_to_pipe: + kfree(ar_pci->serv_to_pipe); +err_free_pipe_config: + kfree(ar_pci->pipe_config); +err_free_attr: + kfree(ar_pci->attr); + return ret; } void ath10k_pci_release_resource(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + ath10k_pci_rx_retry_sync(ar); netif_napi_del(&ar->napi); ath10k_pci_ce_deinit(ar); ath10k_pci_free_pipes(ar); + kfree(ar_pci->attr); + kfree(ar_pci->pipe_config); + kfree(ar_pci->serv_to_pipe); } static const struct ath10k_bus_ops ath10k_pci_bus_ops = { @@ -3601,30 +3636,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); - ar_pci->attr = kmemdup(pci_host_ce_config_wlan, - sizeof(pci_host_ce_config_wlan), - GFP_KERNEL); - if (!ar_pci->attr) { - ret = -ENOMEM; - goto err_free; - } - - ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan, - sizeof(pci_target_ce_config_wlan), - GFP_KERNEL); - if (!ar_pci->pipe_config) { - ret = -ENOMEM; - goto err_free; - } - - ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan, - sizeof(pci_target_service_to_ce_map_wlan), - GFP_KERNEL); - if (!ar_pci->serv_to_pipe) { - ret = -ENOMEM; - goto err_free; - } - ret = ath10k_pci_setup_resource(ar); if (ret) { ath10k_err(ar, "failed to setup resource: %d\n", ret); @@ -3705,10 +3716,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev, err_free_irq: ath10k_pci_free_irq(ar); - ath10k_pci_rx_retry_sync(ar); err_deinit_irq: - ath10k_pci_deinit_irq(ar); + ath10k_pci_release_resource(ar); err_sleep: ath10k_pci_sleep_sync(ar); @@ -3720,29 +3730,18 @@ static int ath10k_pci_probe(struct pci_dev *pdev, err_core_destroy: ath10k_core_destroy(ar); -err_free: - kfree(ar_pci->attr); - kfree(ar_pci->pipe_config); - kfree(ar_pci->serv_to_pipe); - return ret; } static void ath10k_pci_remove(struct pci_dev *pdev) { struct ath10k *ar = pci_get_drvdata(pdev); - struct ath10k_pci *ar_pci; ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); if (!ar) return; - ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci) - return; - ath10k_core_unregister(ar); ath10k_pci_free_irq(ar); ath10k_pci_deinit_irq(ar); @@ -3750,9 +3749,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); ath10k_core_destroy(ar); - kfree(ar_pci->attr); - kfree(ar_pci->pipe_config); - kfree(ar_pci->serv_to_pipe); } MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 4ed21dad6a8e16040d177c3ff9ef7c7e09035b3f..3f563e02d17daa315d6241477142e42f190fcefa 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -733,11 +733,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) return; } + rx_buf->skb = nskb; + usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), nskb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, nskb, 1); + ath9k_hif_usb_reg_in_cb, rx_buf, 1); } resubmit: diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 7987a288917bac0aab3ccb3164fdb7634255e473..27116c7d3f4f8623d90a4ea72779a880ed507516 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -271,6 +271,8 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, { struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data; u32 tp = le32_to_cpu(trig->time_point); + struct iwl_ucode_tlv *dup = NULL; + int ret; if (le32_to_cpu(tlv->length) < sizeof(*trig)) return -EINVAL; @@ -283,10 +285,20 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, return -EINVAL; } - if (!le32_to_cpu(trig->occurrences)) + if (!le32_to_cpu(trig->occurrences)) { + dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length), + GFP_KERNEL); + if (!dup) + return -ENOMEM; + trig = (void *)dup->data; trig->occurrences = cpu_to_le32(-1); + tlv = dup; + } + + ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); + kfree(dup); - return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); + return ret; } static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index fee01cbbd3ace83d21d8a3f95a1ce1b79a2598a4..27977992fd7fa2c1bcf10c7eb783f0a2ef5fba88 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1189,17 +1189,15 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) iwl_mvm_change_queue_tid(mvm, i); + rcu_read_unlock(); + if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); - if (ret) { - rcu_read_unlock(); + if (ret) return ret; - } } - rcu_read_unlock(); - return free_queue; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 65d65c6baf4c344e463d781b6d3a099caddc80f7..e02bafb8921f6ad16506f6bf21f334c1aa9f870b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -582,6 +582,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), + IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name), + IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index dfe625a53c6319cf04b362baeaae88fcf792c335..3d7db6ffb599872231a458253b1654034fb22bf9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -301,6 +301,7 @@ struct mt76_hw_cap { #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) #define MT_DRV_SW_RX_AIRTIME BIT(2) #define MT_DRV_RX_DMA_HDR BIT(3) +#define MT_DRV_HW_MGMT_TXQ BIT(4) struct mt76_driver_ops { u32 drv_flags; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 26cb711b465f38b6f078b52202ba40fca8a55017..83dfa6da4761edb281bdf9d405009997bb057824 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -642,8 +642,10 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7603_dev *dev = hw->priv; + mutex_lock(&dev->mt76.mutex); dev->coverage_class = max_t(s16, coverage_class, 0); mt7603_mac_set_timing(dev); + mutex_unlock(&dev->mt76.mutex); } static void mt7603_tx(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index fd3ef483a87ce75dbed77a706f05fdbd675e8731..d06afcf46d67ccde47bd0466b053891c3b6f7731 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -234,10 +234,11 @@ mt7615_queues_acq(struct seq_file *s, void *data) int i; for (i = 0; i < 16; i++) { - int j, acs = i / 4, index = i % 4; + int j, wmm_idx = i % MT7615_MAX_WMM_SETS; + int acs = i / MT7615_MAX_WMM_SETS; u32 ctrl, val, qlen = 0; - val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index)); + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx)); ctrl = BIT(31) | BIT(15) | (acs << 8); for (j = 0; j < 32; j++) { @@ -245,11 +246,11 @@ mt7615_queues_acq(struct seq_file *s, void *data) continue; mt76_wr(dev, MT_PLE_FL_Q0_CTRL, - ctrl | (j + (index << 5))); + ctrl | (j + (wmm_idx << 5))); qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, GENMASK(11, 0)); } - seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen); + seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen); } return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index 5a124610d4af02d5bd5084ccee5c2c2f59bd473a..e5a965df899a174d7219a49522996627102ee4f9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -36,10 +36,10 @@ static int mt7622_init_tx_queues_multi(struct mt7615_dev *dev) { static const u8 wmm_queue_map[] = { - MT7622_TXQ_AC0, - MT7622_TXQ_AC1, - MT7622_TXQ_AC2, - MT7622_TXQ_AC3, + [IEEE80211_AC_BK] = MT7622_TXQ_AC0, + [IEEE80211_AC_BE] = MT7622_TXQ_AC1, + [IEEE80211_AC_VI] = MT7622_TXQ_AC2, + [IEEE80211_AC_VO] = MT7622_TXQ_AC3, }; int ret; int i; @@ -100,6 +100,7 @@ mt7615_tx_cleanup(struct mt7615_dev *dev) int i; mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); + mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); if (is_mt7615(&dev->mt76)) { mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); } else { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index edac37e7847bad3a325ab6545386366e34b669eb..22e4eabe6578d6363de9de9e3195815bb7cefae7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -72,8 +72,7 @@ static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr) { int ret; - ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE + - MT7615_EEPROM_EXTRA_DATA); + ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_FULL_SIZE); if (ret < 0) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h index 40fed7adc58a929b510467482d8fccce41064b98..a024dee103620d9f886fab4a1be17381376b05d7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h @@ -17,7 +17,7 @@ #define MT7615_EEPROM_TXDPD_SIZE 216 #define MT7615_EEPROM_TXDPD_COUNT (44 + 3) -#define MT7615_EEPROM_EXTRA_DATA (MT7615_EEPROM_TXDPD_OFFSET + \ +#define MT7615_EEPROM_FULL_SIZE (MT7615_EEPROM_TXDPD_OFFSET + \ MT7615_EEPROM_TXDPD_COUNT * \ MT7615_EEPROM_TXDPD_SIZE) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 9f1c6ca7a665bcce42da6c1bb885e9c9456f7ac2..d97315ec72658ee3d74c2612bf38f5e8781aeba9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -526,22 +526,16 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; - if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { - q_idx = wmm_idx * MT7615_MAX_WMM_SETS + - skb_get_queue_mapping(skb); - p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; - } else if (beacon) { - if (ext_phy) - q_idx = MT_LMAC_BCN1; - else - q_idx = MT_LMAC_BCN0; + if (beacon) { p_fmt = MT_TX_TYPE_FW; + q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; + } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { + p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; + q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; } else { - if (ext_phy) - q_idx = MT_LMAC_ALTX1; - else - q_idx = MT_LMAC_ALTX0; p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; + q_idx = wmm_idx * MT7615_MAX_WMM_SETS + + mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); } val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h index f0d4b29a52a240b1516be57f1b88855d10313ca8..81608ab656b8ef7d42ed755f906234e699ef027e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -124,21 +124,6 @@ enum tx_pkt_type { MT_TX_TYPE_FW, }; -enum tx_pkt_queue_idx { - MT_LMAC_AC00, - MT_LMAC_AC01, - MT_LMAC_AC02, - MT_LMAC_AC03, - MT_LMAC_ALTX0 = 0x10, - MT_LMAC_BMC0, - MT_LMAC_BCN0, - MT_LMAC_PSMP0, - MT_LMAC_ALTX1, - MT_LMAC_BMC1, - MT_LMAC_BCN1, - MT_LMAC_PSMP1, -}; - enum tx_port_idx { MT_TX_PORT_IDX_LMAC, MT_TX_PORT_IDX_MCU diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index c26f99b368d934b974780db87e8fa8aaf8a98865..beaca8127680827f81c4ee6f9179f30c088013c0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -397,6 +397,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt7615_dev *dev = mt7615_hw_dev(hw); + queue = mt7615_lmac_mapping(dev, queue); queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS; return mt7615_mcu_set_wmm(dev, queue, params); @@ -735,9 +736,12 @@ static void mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_dev *dev = phy->dev; + mutex_lock(&dev->mt76.mutex); phy->coverage_class = max_t(s16, coverage_class, 0); mt7615_mac_set_timing(phy); + mutex_unlock(&dev->mt76.mutex); } static int diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index e670393506f0b4dbe6a66646519e30f4db3e07b0..2e99845b9c96c67fe4110a14485f0aaa8e066cbc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -146,7 +146,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, static const struct mt76_driver_ops drv_ops = { /* txwi_size = txd size + txp size */ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common), - .drv_flags = MT_DRV_TXWI_NO_FREE, + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, .survey_flags = SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_BSS_RX, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index d6176d316bee26674d8f9d18ec265679167da161..3e7d51bf42a4d32f9796610f79e5fee68c8c3b8d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -282,6 +282,21 @@ struct mt7615_dev { struct list_head wd_head; }; +enum tx_pkt_queue_idx { + MT_LMAC_AC00, + MT_LMAC_AC01, + MT_LMAC_AC02, + MT_LMAC_AC03, + MT_LMAC_ALTX0 = 0x10, + MT_LMAC_BMC0, + MT_LMAC_BCN0, + MT_LMAC_PSMP0, + MT_LMAC_ALTX1, + MT_LMAC_BMC1, + MT_LMAC_BCN1, + MT_LMAC_PSMP1, +}; + enum { HW_BSSID_0 = 0x0, HW_BSSID_1, @@ -447,6 +462,21 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev) return MT7615_WTBL_SIZE; } +static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac) +{ + static const u8 lmac_queue_map[] = { + [IEEE80211_AC_BK] = MT_LMAC_AC00, + [IEEE80211_AC_BE] = MT_LMAC_AC01, + [IEEE80211_AC_VI] = MT_LMAC_AC02, + [IEEE80211_AC_VO] = MT_LMAC_AC03, + }; + + if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map))) + return MT_LMAC_AC01; /* BE */ + + return lmac_queue_map[ac]; +} + void mt7615_dma_reset(struct mt7615_dev *dev); void mt7615_scan_work(struct work_struct *work); void mt7615_roc_work(struct work_struct *work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index a50077eb24d75f2b10635fdc930c42bef970e2e1..5be6704770ad0ca313208f720595ad6d79d83a43 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -270,7 +270,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf, { static const struct mt76_driver_ops drv_ops = { .txwi_size = MT_USB_TXD_SIZE, - .drv_flags = MT_DRV_RX_DMA_HDR, + .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, .tx_prepare_skb = mt7663u_tx_prepare_skb, .tx_complete_skb = mt7663u_tx_complete_skb, .tx_status_data = mt7663u_tx_status_data, @@ -329,25 +329,26 @@ static int mt7663u_probe(struct usb_interface *usb_intf, if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON, FW_STATE_PWR_ON << 1, 500)) { dev_err(dev->mt76.dev, "Timeout for power on\n"); - return -EIO; + ret = -EIO; + goto error; } alloc_queues: ret = mt76u_alloc_mcu_queue(&dev->mt76); if (ret) - goto error; + goto error_free_q; ret = mt76u_alloc_queues(&dev->mt76); if (ret) - goto error; + goto error_free_q; ret = mt7663u_register_device(dev); if (ret) - goto error_freeq; + goto error_free_q; return 0; -error_freeq: +error_free_q: mt76u_queues_deinit(&dev->mt76); error: mt76u_deinit(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index cbbe986655fe50c414f9219418e9ea217a5babed..5fda6e7b120c20c83c06dcd1e21af4cb2d944946 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -456,8 +456,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) tasklet_disable(&dev->mt76.tx_tasklet); napi_disable(&dev->mt76.tx_napi); - for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) + mt76_for_each_q_rx(&dev->mt76, i) { napi_disable(&dev->mt76.napi[i]); + } mutex_lock(&dev->mt76.mutex); @@ -515,7 +516,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) tasklet_enable(&dev->mt76.pre_tbtt_tasklet); - for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) { + mt76_for_each_q_rx(&dev->mt76, i) { napi_enable(&dev->mt76.napi[i]); napi_schedule(&dev->mt76.napi[i]); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 0575c259f2459ff6ee01ffa6e094306745374611..05b5650c56c82ddd2bb4ea6245a0852cbfb21b27 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -716,9 +716,12 @@ static void mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = phy->dev; + mutex_lock(&dev->mt76.mutex); phy->coverage_class = max_t(s16, coverage_class, 0); mt7915_mac_set_timing(phy); + mutex_unlock(&dev->mt76.mutex); } static int diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index fca38ea2441f79d86504e2e410e646c1148ece40..f10c98aa883c0ebdf92b7216bd22240f11eb5cea 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -264,6 +264,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, skb_set_queue_mapping(skb, qid); } + if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && + !ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { + qid = MT_TXQ_PSD; + skb_set_queue_mapping(skb, qid); + } + if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) ieee80211_get_tx_rates(info->control.vif, sta, skb, info->control.rates, 1); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index fb97ea25b4d436a88ca309d25757d8a96dc10b98..87382b2f74433d9b30da36d8ea8a04db860afe72 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -1010,17 +1010,18 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac) { if (mt76_chip(dev) == 0x7663) { - static const u8 wmm_queue_map[] = { - [IEEE80211_AC_VO] = 0, - [IEEE80211_AC_VI] = 1, - [IEEE80211_AC_BE] = 2, - [IEEE80211_AC_BK] = 4, + static const u8 lmac_queue_map[] = { + /* ac to lmac mapping */ + [IEEE80211_AC_BK] = 0, + [IEEE80211_AC_BE] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 4, }; - if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map))) - return 2; /* BE */ + if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map))) + return 1; /* BE */ - return wmm_queue_map[ac]; + return lmac_queue_map[ac]; } return mt76_ac_to_hwq(ac); @@ -1066,11 +1067,16 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) static void mt76u_free_tx(struct mt76_dev *dev) { - struct mt76_queue *q; - int i, j; + int i; for (i = 0; i < IEEE80211_NUM_ACS; i++) { + struct mt76_queue *q; + int j; + q = dev->q_tx[i].q; + if (!q) + continue; + for (j = 0; j < q->ndesc; j++) usb_free_urb(q->entry[j].urb); } @@ -1078,17 +1084,22 @@ static void mt76u_free_tx(struct mt76_dev *dev) void mt76u_stop_tx(struct mt76_dev *dev) { - struct mt76_queue_entry entry; - struct mt76_queue *q; - int i, j, ret; + int ret; ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy), HZ / 5); if (!ret) { + struct mt76_queue_entry entry; + struct mt76_queue *q; + int i, j; + dev_err(dev->dev, "timed out waiting for pending tx\n"); for (i = 0; i < IEEE80211_NUM_ACS; i++) { q = dev->q_tx[i].q; + if (!q) + continue; + for (j = 0; j < q->ndesc; j++) usb_kill_urb(q->entry[j].urb); } @@ -1100,6 +1111,8 @@ void mt76u_stop_tx(struct mt76_dev *dev) */ for (i = 0; i < IEEE80211_NUM_ACS; i++) { q = dev->q_tx[i].q; + if (!q) + continue; /* Assure we are in sync with killed tasklet. */ spin_lock_bh(&q->lock); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 482c6c8b0fb7e0fb2476d52b41156b45903a46fe..88280057e032175c125f2073c89cc8e940ff97fd 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +#define XENNET_TIMEOUT (5 * HZ) + static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { @@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); - xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + do { + xenbus_switch_state(dev, XenbusStateInitialising); + err = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown, XENNET_TIMEOUT); + } while (!err); + return netdev; exit: @@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = { }; #endif /* CONFIG_SYSFS */ -static int xennet_remove(struct xenbus_device *dev) +static void xennet_bus_close(struct xenbus_device *dev) { - struct netfront_info *info = dev_get_drvdata(&dev->dev); - - dev_dbg(&dev->dev, "%s\n", dev->nodename); + int ret; - if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosing || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing || + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); + + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosed || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); - } + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); +} + +static int xennet_remove(struct xenbus_device *dev) +{ + struct netfront_info *info = dev_get_drvdata(&dev->dev); + xennet_bus_close(dev); xennet_disconnect_backend(info); if (info->netdev->reg_state == NETREG_REGISTERED) diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index 91d4d5b28a7d9e41dd1b45976f62c9eb2569a249..ba6c486d64659720431977b28e6d44a43659ca5e 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -198,6 +198,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb, case S3FWRN5_MODE_FW: return s3fwrn5_fw_recv_frame(ndev, skb); default: + kfree_skb(skb); return -ENODEV; } } diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index 89b85970912dbe076ae37cbc2ce56bce570f0457..4cef69bd3c1bd8638c8181ca632d158712a091c0 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -95,7 +95,7 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm, struct encrypted_key_payload *epayload; struct device *dev = &nvdimm->dev; - keyref = lookup_user_key(id, 0, 0); + keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH); if (IS_ERR(keyref)) return NULL; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 8410d03b940d7454e32df216e3e27f904c9c28e1..4ee2330c603e77265388981dc7a38c3f73309439 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1102,6 +1102,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, int pos; int len; + if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) + return 0; + c.identify.opcode = nvme_admin_identify; c.identify.nsid = cpu_to_le32(nsid); c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; @@ -1115,18 +1118,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, if (status) { dev_warn(ctrl->device, "Identify Descriptors failed (%d)\n", status); - /* - * Don't treat non-retryable errors as fatal, as we potentially - * already have a NGUID or EUI-64. If we failed with DNR set, - * we want to silently ignore the error as we can still - * identify the device, but if the status has DNR set, we want - * to propagate the error back specifically for the disk - * revalidation flow to make sure we don't abandon the - * device just because of a temporal retry-able error (such - * as path of transport errors). - */ - if (status > 0 && (status & NVME_SC_DNR)) - status = 0; goto free_data; } @@ -1980,6 +1971,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); blk_queue_stack_limits(ns->head->disk->queue, ns->queue); + nvme_mpath_update_disk_size(ns->head->disk); } #endif return 0; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 2ef8d501e2a87c3c94e5965a61df34aae22fee0b..09ffc3246f60e410f52163528d4b7a516db37e72 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -129,6 +129,13 @@ enum nvme_quirks { * Don't change the value of the temperature threshold feature */ NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14), + + /* + * The controller doesn't handle the Identify Namespace + * Identification Descriptor list subcommand despite claiming + * NVMe 1.3 compliance. + */ + NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), }; /* @@ -604,6 +611,16 @@ static inline void nvme_trace_bio_complete(struct request *req, trace_block_bio_complete(ns->head->disk->queue, req->bio); } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ + struct block_device *bdev = bdget_disk(disk, 0); + + if (bdev) { + bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT); + bdput(bdev); + } +} + extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; extern struct device_attribute subsys_attr_iopolicy; @@ -679,6 +696,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) { } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ +} #endif /* CONFIG_NVME_MULTIPATH */ #ifdef CONFIG_NVM diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b1d18f0633c7516ada1c3acc1b78a9ee89e9e801..d4b1ff7471231bb023824947d892d393013f6ed4 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3099,6 +3099,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_IDENTIFY_CNS | NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ @@ -3122,6 +3124,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), .driver_data = NVME_QUIRK_SINGLE_VECTOR }, diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 79ef2b8e2b3ca738c7f0d81eb70c1cf5a4150245..f3a91818167b1b2c8f942cfffed1217d1f7ba517 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1382,6 +1382,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, if (nctrl->opts->tos >= 0) ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); + /* Set 10 seconds timeout for icresp recvmsg */ + queue->sock->sk->sk_rcvtimeo = 10 * HZ; + queue->sock->sk->sk_allocation = GFP_ATOMIC; nvme_tcp_set_queue_io_cpu(queue); queue->request = NULL; diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 9a5873591a40c92184220dc1c61e55c8b40cb5ef..314f306140a1cc13a46b1ac302351111cddc108f 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -902,6 +902,10 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) return -EINVAL; } + mutex_lock(&opp_table->lock); + opp_table->parsed_static_opps = 1; + mutex_unlock(&opp_table->lock); + val = prop->value; while (nr) { unsigned long freq = be32_to_cpup(val++) * 1000; diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index e386d4eac4070dc241ac3a10cc183687d2ce2126..9a64cf90c291b13d1d9cc4c548c131a11885df52 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -546,9 +546,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, x86_vector_domain); - irq_domain_free_fwnode(fn); - if (!vmd->irq_domain) + if (!vmd->irq_domain) { + irq_domain_free_fwnode(fn); return -ENODEV; + } pci_add_resource(&resources, &vmd->resources[0]); pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index ce096272f52b19e5829adbaed656a112d0c63167..c9338f914a0ed137286318e6ada088d709c393ff 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4638,8 +4638,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) * pcie_wait_for_link_delay - Wait until link is active or inactive * @pdev: Bridge device * @active: waiting for active or inactive? - * @delay: Delay to wait after link has become active (in ms). Specify %0 - * for no delay. + * @delay: Delay to wait after link has become active (in ms) * * Use this to wait till link becomes active or inactive. */ @@ -4680,7 +4679,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, msleep(10); timeout -= 10; } - if (active && ret && delay) + if (active && ret) msleep(delay); else if (ret != active) pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n", @@ -4801,28 +4800,17 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev) if (!pcie_downstream_port(dev)) return; - /* - * Per PCIe r5.0, sec 6.6.1, for downstream ports that support - * speeds > 5 GT/s, we must wait for link training to complete - * before the mandatory delay. - * - * We can only tell when link training completes via DLL Link - * Active, which is required for downstream ports that support - * speeds > 5 GT/s (sec 7.5.3.6). Unfortunately some common - * devices do not implement Link Active reporting even when it's - * required, so we'll check for that directly instead of checking - * the supported link speed. We assume devices without Link Active - * reporting can train in 100 ms regardless of speed. - */ - if (dev->link_active_reporting) { - pci_dbg(dev, "waiting for link to train\n"); - if (!pcie_wait_for_link_delay(dev, true, 0)) { + if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { + pci_dbg(dev, "waiting %d ms for downstream link\n", delay); + msleep(delay); + } else { + pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", + delay); + if (!pcie_wait_for_link_delay(dev, true, delay)) { /* Did not train, no need to wait any further */ return; } } - pci_dbg(child, "waiting %d ms to become accessible\n", delay); - msleep(delay); if (!pci_device_is_present(child)) { pci_dbg(child, "waiting additional %d ms to become accessible\n", delay); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 812bfc32ecb82bcc149a51e338bfc853a16c52f8..2ea61abd58302c84b14f5d018b12232dd3defd9d 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); +static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev) +{ + pci_info(dev, "Disabling ASPM L0s/L1\n"); + pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); +} + +/* + * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the + * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected; + * disable both L0s and L1 for now to be safe. + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1); + /* * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain * Link bit cleared after starting the link retrain process to allow this diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 1b8e337a29cacd4fd33458e88987853f03121ef5..87c4be9dd41251cc20287e9f336ad99c1be192ec 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1718,6 +1718,7 @@ static struct platform_driver cci_pmu_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = arm_cci_pmu_matches, + .suppress_bind_attrs = true, }, .probe = cci_pmu_probe, .remove = cci_pmu_remove, diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index d50edef91f59be0fdaebc6d2cc12303ceef482fb..7b7d23f2571390c3715ac778ef555873cd8a2eab 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1545,6 +1545,7 @@ static struct platform_driver arm_ccn_driver = { .driver = { .name = "arm-ccn", .of_match_table = arm_ccn_match, + .suppress_bind_attrs = true, }, .probe = arm_ccn_probe, .remove = arm_ccn_remove, diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 518d0603e24f35652b601b98680f0ac5d2440be1..96ed93cc78e65a5ee49285a3168bc777980a4612 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -757,6 +757,7 @@ static struct platform_driver dsu_pmu_driver = { .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(dsu_pmu_of_match), + .suppress_bind_attrs = true, }, .probe = dsu_pmu_device_probe, .remove = dsu_pmu_device_remove, diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 48e28ef93a7022f21258016d2c4a1cf109dc1539..4cdb35d166acc350207e8ee5fb6113afee62944b 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -742,6 +742,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, smmu_pmu); smmu_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .pmu_enable = smmu_pmu_enable, .pmu_disable = smmu_pmu_disable, @@ -859,6 +860,7 @@ static void smmu_pmu_shutdown(struct platform_device *pdev) static struct platform_driver smmu_pmu_driver = { .driver = { .name = "arm-smmu-v3-pmcg", + .suppress_bind_attrs = true, }, .probe = smmu_pmu_probe, .remove = smmu_pmu_remove, diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index d80f48798bceda66d18f5f44b448be0f0c79d682..e51ddb6d63eda31acbd1a78c6060ff2e223ec44e 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -1226,6 +1226,7 @@ static struct platform_driver arm_spe_pmu_driver = { .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(arm_spe_pmu_of_match), + .suppress_bind_attrs = true, }, .probe = arm_spe_pmu_device_probe, .remove = arm_spe_pmu_device_remove, diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 90884d14f95faea01082bfd82c587ecedbd31667..397540a4b799c83d0030a35be2c93c6d77953a3f 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -512,6 +512,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, { *pmu = (struct ddr_pmu) { .pmu = (struct pmu) { + .module = THIS_MODULE, .capabilities = PERF_PMU_CAP_NO_EXCLUDE, .task_ctx_nr = perf_invalid_context, .attr_groups = attr_groups, @@ -706,6 +707,7 @@ static struct platform_driver imx_ddr_pmu_driver = { .driver = { .name = "imx-ddr-pmu", .of_match_table = imx_ddr_pmu_dt_ids, + .suppress_bind_attrs = true, }, .probe = ddr_perf_probe, .remove = ddr_perf_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 15713faaa07e77eeed9dbc5333f2bf77c228d924..5e3645c96443f6a864bfae50139dd6208a04c851 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -378,6 +378,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) ddrc_pmu->sccl_id, ddrc_pmu->index_id); ddrc_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -418,6 +419,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = { .driver = { .name = "hisi_ddrc_pmu", .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_ddrc_pmu_probe, .remove = hisi_ddrc_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index dcc5600788a9a271a2adaec47fa84703ec78fbfc..5eb8168029c037fdc37bd4f5e1a38e6a1396ccac 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -390,6 +390,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) hha_pmu->sccl_id, hha_pmu->index_id); hha_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -430,6 +431,7 @@ static struct platform_driver hisi_hha_pmu_driver = { .driver = { .name = "hisi_hha_pmu", .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_hha_pmu_probe, .remove = hisi_hha_pmu_remove, diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 7719ae4e2c561468a08c1c0a50d4e2fba411bac0..3e8b5eab55149a63794a5b09eafc6b8a7e40527c 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -380,6 +380,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) l3c_pmu->sccl_id, l3c_pmu->index_id); l3c_pmu->pmu = (struct pmu) { .name = name, + .module = THIS_MODULE, .task_ctx_nr = perf_invalid_context, .event_init = hisi_uncore_pmu_event_init, .pmu_enable = hisi_uncore_pmu_enable, @@ -420,6 +421,7 @@ static struct platform_driver hisi_l3c_pmu_driver = { .driver = { .name = "hisi_l3c_pmu", .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = hisi_l3c_pmu_probe, .remove = hisi_l3c_pmu_remove, diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 21d6991dbe0ba8efa054499c9695a916752336b1..4da37f650f983fe7d02cb33537475d9478d34843 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -1028,6 +1028,7 @@ static struct platform_driver l2_cache_pmu_driver = { .driver = { .name = "qcom-l2cache-pmu", .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = l2_cache_pmu_probe, .remove = l2_cache_pmu_remove, diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c index 656e830798d9e79ce9bcd050407b0df117a31ff3..9ddb577c542b5e68a94898f68eb30161966bf99f 100644 --- a/drivers/perf/qcom_l3_pmu.c +++ b/drivers/perf/qcom_l3_pmu.c @@ -814,6 +814,7 @@ static struct platform_driver qcom_l3_cache_pmu_driver = { .driver = { .name = "qcom-l3cache-pmu", .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match), + .suppress_bind_attrs = true, }, .probe = qcom_l3_cache_pmu_probe, }; diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c index 51b31d6ff2c4cf6c65bcdc58c83d821ecd4250ad..aac9823b0c6bbdb7050354a594709d48c26cf07d 100644 --- a/drivers/perf/thunderx2_pmu.c +++ b/drivers/perf/thunderx2_pmu.c @@ -1017,6 +1017,7 @@ static struct platform_driver tx2_uncore_driver = { .driver = { .name = "tx2-uncore-pmu", .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match), + .suppress_bind_attrs = true, }, .probe = tx2_uncore_probe, .remove = tx2_uncore_remove, diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 46ee6807d533af6691e475ba9a3b6cc783c3337e..edac28cd25ddc30734ea5bf54a0474970abaeff9 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1975,6 +1975,7 @@ static struct platform_driver xgene_pmu_driver = { .name = "xgene-pmu", .of_match_table = xgene_pmu_of_match, .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match), + .suppress_bind_attrs = true, }, }; diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 85692738224873e0670ec8e6f51485d9d4203bc1..e5842e48a5e07b941f2b5ecc744ba820fec8875f 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -545,13 +545,14 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work) struct sun4i_usb_phy_data *data = container_of(work, struct sun4i_usb_phy_data, detect.work); struct phy *phy0 = data->phys[0].phy; - struct sun4i_usb_phy *phy = phy_get_drvdata(phy0); + struct sun4i_usb_phy *phy; bool force_session_end, id_notify = false, vbus_notify = false; int id_det, vbus_det; - if (phy0 == NULL) + if (!phy0) return; + phy = phy_get_drvdata(phy0); id_det = sun4i_usb_phy0_get_id_det(data); vbus_det = sun4i_usb_phy0_get_vbus_det(data); diff --git a/drivers/phy/intel/phy-intel-combo.c b/drivers/phy/intel/phy-intel-combo.c index c2a35be4cdfbd6c097d54d41bc38587bd653abd4..360b1eb2ebd694a47608ef307df054290a3e685b 100644 --- a/drivers/phy/intel/phy-intel-combo.c +++ b/drivers/phy/intel/phy-intel-combo.c @@ -134,7 +134,7 @@ static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg, reg_val = readl(base + reg); reg_val &= ~mask; - reg_val |= FIELD_PREP(mask, val); + reg_val |= val; writel(reg_val, base + reg); } @@ -169,7 +169,7 @@ static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy) return 0; combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL, - PCIE_PHY_CLK_PAD, 0); + PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0)); /* Delay for stable clock PLL */ usleep_range(50, 100); @@ -192,14 +192,14 @@ static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy) return 0; combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL, - PCIE_PHY_CLK_PAD, 1); + PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1)); return 0; } static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy) { - enum intel_combo_mode cb_mode = PHY_PCIE_MODE; + enum intel_combo_mode cb_mode; enum aggregated_mode aggr = cbphy->aggr_mode; struct device *dev = cbphy->dev; enum intel_phy_mode mode; @@ -224,6 +224,8 @@ static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy) cb_mode = SATA0_SATA1_MODE; break; + default: + return -EINVAL; } ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode); @@ -385,7 +387,7 @@ static int intel_cbphy_calibrate(struct phy *phy) /* trigger auto RX adaptation */ combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id), - ADAPT_REQ_MSK, 3); + ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3)); /* Wait RX adaptation to finish */ ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id), val, val & RX_ADAPT_ACK_BIT, 10, 5000); @@ -396,7 +398,7 @@ static int intel_cbphy_calibrate(struct phy *phy) /* Stop RX adaptation */ combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id), - ADAPT_REQ_MSK, 0); + ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0)); return ret; } diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c index a7c6c940a3a8c407432a4f1926417138c67141f4..8af8c6c5cc028b2799015e636ecdaed6a5db0289 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c @@ -607,8 +607,8 @@ static int inno_dsidphy_probe(struct platform_device *pdev) platform_set_drvdata(pdev, inno); inno->phy_base = devm_platform_ioremap_resource(pdev, 0); - if (!inno->phy_base) - return -ENOMEM; + if (IS_ERR(inno->phy_base)) + return PTR_ERR(inno->phy_base); inno->ref_clk = devm_clk_get(dev, "ref"); if (IS_ERR(inno->ref_clk)) { diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index 0a166d5a64146ee8ded05ec5063c24c01da44802..a174b3c3f010fad0199256ce9a841eb9b0c8fdbc 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -72,7 +72,7 @@ struct serdes_am654_clk_mux { #define to_serdes_am654_clk_mux(_hw) \ container_of(_hw, struct serdes_am654_clk_mux, hw) -static struct regmap_config serdes_am654_regmap_config = { +static const struct regmap_config serdes_am654_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index 30ea5b20728587f9e3fcf8937f2334c7dbd03893..33c4cf0105a49f8cfe944f2fe5182e29f0c9e588 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -117,7 +117,7 @@ struct wiz_clk_mux { struct wiz_clk_divider { struct clk_hw hw; struct regmap_field *field; - struct clk_div_table *table; + const struct clk_div_table *table; struct clk_init_data clk_data; }; @@ -131,7 +131,7 @@ struct wiz_clk_mux_sel { struct wiz_clk_div_sel { struct regmap_field *field; - struct clk_div_table *table; + const struct clk_div_table *table; const char *node_name; }; @@ -173,7 +173,7 @@ static struct wiz_clk_mux_sel clk_mux_sel_10g[] = { }, }; -static struct clk_div_table clk_div_table[] = { +static const struct clk_div_table clk_div_table[] = { { .val = 0, .div = 1, }, { .val = 1, .div = 2, }, { .val = 2, .div = 4, }, @@ -559,7 +559,7 @@ static const struct clk_ops wiz_clk_div_ops = { static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node, struct regmap_field *field, - struct clk_div_table *table) + const struct clk_div_table *table) { struct device *dev = wiz->dev; struct wiz_clk_divider *div; @@ -756,7 +756,7 @@ static const struct reset_control_ops wiz_phy_reset_ops = { .deassert = wiz_phy_reset_deassert, }; -static struct regmap_config wiz_regmap_config = { +static const struct regmap_config wiz_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 0ff7c55173da0e0a3dd4b16bab38ed68063d3153..615174a9d1e06a6ffbd01f1486619b98446c4262 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -800,6 +800,21 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev, pm_runtime_put(vg->dev); } +static void byt_gpio_direct_irq_check(struct intel_pinctrl *vg, + unsigned int offset) +{ + void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); + + /* + * Before making any direction modifications, do a check if gpio is set + * for direct IRQ. On Bay Trail, setting GPIO to output does not make + * sense, so let's at least inform the caller before they shoot + * themselves in the foot. + */ + if (readl(conf_reg) & BYT_DIRECT_IRQ_EN) + dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output"); +} + static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, struct pinctrl_gpio_range *range, unsigned int offset, @@ -807,7 +822,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, { struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev); void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); - void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); unsigned long flags; u32 value; @@ -817,14 +831,8 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev, value &= ~BYT_DIR_MASK; if (input) value |= BYT_OUTPUT_EN; - else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN) - /* - * Before making any direction modifications, do a check if gpio - * is set for direct IRQ. On baytrail, setting GPIO to output - * does not make sense, so let's at least inform the caller before - * they shoot themselves in the foot. - */ - dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output"); + else + byt_gpio_direct_irq_check(vg, offset); writel(value, val_reg); @@ -1165,19 +1173,50 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { - return pinctrl_gpio_direction_input(chip->base + offset); + struct intel_pinctrl *vg = gpiochip_get_data(chip); + void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); + unsigned long flags; + u32 reg; + + raw_spin_lock_irqsave(&byt_lock, flags); + + reg = readl(val_reg); + reg &= ~BYT_DIR_MASK; + reg |= BYT_OUTPUT_EN; + writel(reg, val_reg); + + raw_spin_unlock_irqrestore(&byt_lock, flags); + return 0; } +/* + * Note despite the temptation this MUST NOT be converted into a call to + * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this + * MUST be done as a single BYT_VAL_REG register write. + * See the commit message of the commit adding this comment for details. + */ static int byt_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { - int ret = pinctrl_gpio_direction_output(chip->base + offset); + struct intel_pinctrl *vg = gpiochip_get_data(chip); + void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); + unsigned long flags; + u32 reg; - if (ret) - return ret; + raw_spin_lock_irqsave(&byt_lock, flags); + + byt_gpio_direct_irq_check(vg, offset); - byt_gpio_set(chip, offset, value); + reg = readl(val_reg); + reg &= ~BYT_DIR_MASK; + if (value) + reg |= BYT_LEVEL; + else + reg &= ~BYT_LEVEL; + writel(reg, val_reg); + + raw_spin_unlock_irqrestore(&byt_lock, flags); return 0; } diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h index 3e5760f1a715355dfe44eca5d31b69d973ce566d..d4a192df5fabdc5f04370ae41a6eb00e5ecf5590 100644 --- a/drivers/pinctrl/pinctrl-amd.h +++ b/drivers/pinctrl/pinctrl-amd.h @@ -252,7 +252,7 @@ static const struct amd_pingroup kerncz_groups[] = { { .name = "uart0", .pins = uart0_pins, - .npins = 9, + .npins = 5, }, { .name = "uart1", diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index ff1ee159dca2fd72204e36fc3830d98f70df17af..f8ff30cdafa652e762822d350a6733da63ec09d8 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -7,6 +7,8 @@ config PINCTRL_MSM select PINCONF select GENERIC_PINCONF select GPIOLIB_IRQCHIP + select IRQ_DOMAIN_HIERARCHY + select IRQ_FASTEOI_HIERARCHY_HANDLERS config PINCTRL_APQ8064 tristate "Qualcomm APQ8064 pin controller driver" diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 83b7d64bc4c145cc0157a2f45443b13509106d78..c322f30a206481f021c3fbebe9fc50b80bf88869 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -832,6 +832,52 @@ static void msm_gpio_irq_unmask(struct irq_data *d) msm_gpio_irq_clear_unmask(d, false); } +/** + * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent. + * @d: The irq dta. + * + * This is much like msm_gpio_update_dual_edge_pos() but for IRQs that are + * normally handled by the parent irqchip. The logic here is slightly + * different due to what's easy to do with our parent, but in principle it's + * the same. + */ +static void msm_gpio_update_dual_edge_parent(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq]; + int loop_limit = 100; + unsigned int val; + unsigned int type; + + /* Read the value and make a guess about what edge we need to catch */ + val = msm_readl_io(pctrl, g) & BIT(g->in_bit); + type = val ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING; + + do { + /* Set the parent to catch the next edge */ + irq_chip_set_type_parent(d, type); + + /* + * Possibly the line changed between when we last read "val" + * (and decided what edge we needed) and when set the edge. + * If the value didn't change (or changed and then changed + * back) then we're done. + */ + val = msm_readl_io(pctrl, g) & BIT(g->in_bit); + if (type == IRQ_TYPE_EDGE_RISING) { + if (!val) + return; + type = IRQ_TYPE_EDGE_FALLING; + } else if (type == IRQ_TYPE_EDGE_FALLING) { + if (val) + return; + type = IRQ_TYPE_EDGE_RISING; + } + } while (loop_limit-- > 0); + dev_warn_once(pctrl->dev, "dual-edge irq failed to stabilize\n"); +} + static void msm_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -840,8 +886,11 @@ static void msm_gpio_irq_ack(struct irq_data *d) unsigned long flags; u32 val; - if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) + if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) { + if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) + msm_gpio_update_dual_edge_parent(d); return; + } g = &pctrl->soc->groups[d->hwirq]; @@ -860,6 +909,17 @@ static void msm_gpio_irq_ack(struct irq_data *d) raw_spin_unlock_irqrestore(&pctrl->lock, flags); } +static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d, + unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + + return type == IRQ_TYPE_EDGE_BOTH && + pctrl->soc->wakeirq_dual_edge_errata && d->parent_data && + test_bit(d->hwirq, pctrl->skip_wake_irqs); +} + static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -868,11 +928,21 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) unsigned long flags; u32 val; + if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) { + set_bit(d->hwirq, pctrl->dual_edge_irqs); + irq_set_handler_locked(d, handle_fasteoi_ack_irq); + msm_gpio_update_dual_edge_parent(d); + return 0; + } + if (d->parent_data) irq_chip_set_type_parent(d, type); - if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) + if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) { + clear_bit(d->hwirq, pctrl->dual_edge_irqs); + irq_set_handler_locked(d, handle_fasteoi_irq); return 0; + } g = &pctrl->soc->groups[d->hwirq]; diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h index 9452da18a78bd9b743ed896e798ee0dfc67518a6..7486fe08eb9b64cb358f2b5ca34aa2a5d5fa2f0b 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.h +++ b/drivers/pinctrl/qcom/pinctrl-msm.h @@ -113,6 +113,9 @@ struct msm_gpio_wakeirq_map { * @pull_no_keeper: The SoC does not support keeper bias. * @wakeirq_map: The map of wakeup capable GPIOs and the pin at PDC/MPM * @nwakeirq_map: The number of entries in @wakeirq_map + * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need + * to be aware that their parent can't handle dual + * edge interrupts. */ struct msm_pinctrl_soc_data { const struct pinctrl_pin_desc *pins; @@ -128,6 +131,7 @@ struct msm_pinctrl_soc_data { const int *reserved_gpios; const struct msm_gpio_wakeirq_map *wakeirq_map; unsigned int nwakeirq_map; + bool wakeirq_dual_edge_errata; }; extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops; diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c index 1b6465a882f216bc26959ef3d4209842b8047d50..1d9acad3c1ce2a895f63d9207a8b53c62bbd20c8 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7180.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c @@ -1147,6 +1147,7 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = { .ntiles = ARRAY_SIZE(sc7180_tiles), .wakeirq_map = sc7180_pdc_map, .nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map), + .wakeirq_dual_edge_errata = true, }; static int sc7180_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 877aade194979dc6c22078cb710783bd1deda95e..8f4acdc06b1347f3d651d84b5977473994a754e5 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -441,6 +441,7 @@ static int asus_wmi_battery_add(struct power_supply *battery) * battery is named BATT. */ if (strcmp(battery->desc->name, "BAT0") != 0 && + strcmp(battery->desc->name, "BAT1") != 0 && strcmp(battery->desc->name, "BATT") != 0) return -ENODEV; diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h index 1409a5bb55820100a7362b1ff87fbbe5357eed62..4f6f7f0761fc1c940b3e165c08abd7257d3555db 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h @@ -13,6 +13,9 @@ #define INTEL_RAPL_PRIO_DEVID_0 0x3451 #define INTEL_CFG_MBOX_DEVID_0 0x3459 +#define INTEL_RAPL_PRIO_DEVID_1 0x3251 +#define INTEL_CFG_MBOX_DEVID_1 0x3259 + /* * Validate maximum commands in a single request. * This is enough to handle command to every core in one ioctl, or all diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c index d84e2174cbdebe71d9bd48b547ac4cf9c846e3de..95f01e7a87d573a5bcd1f2cc12550d5d0e972f6d 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c @@ -147,6 +147,7 @@ static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume) static const struct pci_device_id isst_if_mbox_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_0)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_1)}, { 0 }, }; MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c index 3584859fcc421aae634ead7c71705951c1edb6bb..aa17fd7817f8fdd4e075a15c7486d1d1bafbfc2f 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c @@ -72,6 +72,7 @@ static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume) static const struct pci_device_id isst_if_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_0)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_1)}, { 0 }, }; MODULE_DEVICE_TABLE(pci, isst_if_ids); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index ff7f0a4f247563f4ebf3c9cce86da52eee1e2a37..0f6fceda5fc0b676be614b5f635111cfa59dbaa4 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -885,11 +885,19 @@ static ssize_t dispatch_proc_write(struct file *file, if (!ibm || !ibm->write) return -EINVAL; + if (count > PAGE_SIZE - 1) + return -EINVAL; + + kernbuf = kmalloc(count + 1, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; - kernbuf = strndup_user(userbuf, PAGE_SIZE); - if (IS_ERR(kernbuf)) - return PTR_ERR(kernbuf); + if (copy_from_user(kernbuf, userbuf, count)) { + kfree(kernbuf); + return -EFAULT; + } + kernbuf[count] = 0; ret = ibm->write(kernbuf); if (ret == 0) ret = count; diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index e8f1633710711c8b39cd443a95152bcf5e469edc..0796e4a47afa5f3ff14af344f9ff731d069f89e4 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -31,7 +31,7 @@ obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o -obj-$(CONFIG_REGULATOR_DA903X) += da903x.o +obj-$(CONFIG_REGULATOR_DA903X) += da903x-regulator.o obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o obj-$(CONFIG_REGULATOR_DA9062) += da9062-regulator.o diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x-regulator.c similarity index 100% rename from drivers/regulator/da903x.c rename to drivers/regulator/da903x-regulator.c diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 53a64d856926f7e4bf4aecd2dd495fa37fd8df8a..7f5c318c82596f8d44b3f52d1b97f59dde204592 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -821,7 +821,7 @@ static const struct rpm_regulator_data rpm_pm8994_regulators[] = { static const struct rpm_regulator_data rpm_pmi8994_regulators[] = { { "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" }, { "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" }, - { "s2", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" }, + { "s3", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" }, { "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" }, {} }; diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index e5a64d4f255cacc3d1795e76af47a4d8c4dfe48a..49c8a1818baf89e58c7543f6157cb307cfcf5bd3 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2629,7 +2629,7 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, "iscsi_q_%d", shost->host_no); ihost->workq = alloc_workqueue("%s", WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, - 2, ihost->workq_name); + 1, ihost->workq_name); if (!ihost->workq) goto free_host; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 319f241da4b66eb30a485a0301553f3e48bbb1bc..fcf03f733e4172eff8ea33b72164a8e2cbba51ff 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3739,10 +3739,8 @@ static irqreturn_t megasas_isr_fusion(int irq, void *devp) if (instance->mask_interrupts) return IRQ_NONE; -#if defined(ENABLE_IRQ_POLL) if (irq_context->irq_poll_scheduled) return IRQ_HANDLED; -#endif if (!instance->msix_vectors) { mfiStatus = instance->instancet->clear_intr(instance); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 62e552838565fa32fd36d7e8226218845a2e516f..983e568ff2317ec52d4a02530afd5d0b308daf49 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -3145,19 +3145,18 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, if (!ioc->is_warpdrive) { ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n", __func__); - goto out; + return 0; } /* pci_access_mutex lock acquired by sysfs show path */ mutex_lock(&ioc->pci_access_mutex); - if (ioc->pci_error_recovery || ioc->remove_host) { - mutex_unlock(&ioc->pci_access_mutex); - return 0; - } + if (ioc->pci_error_recovery || ioc->remove_host) + goto out; /* allocate upto GPIOVal 36 entries */ sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); io_unit_pg3 = kzalloc(sz, GFP_KERNEL); if (!io_unit_pg3) { + rc = -ENOMEM; ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n", __func__, sz); goto out; @@ -3167,6 +3166,7 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, 0) { ioc_err(ioc, "%s: failed reading iounit_pg3\n", __func__); + rc = -EINVAL; goto out; } @@ -3174,12 +3174,14 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n", __func__, ioc_status); + rc = -EINVAL; goto out; } if (io_unit_pg3->GPIOCount < 25) { ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n", __func__, io_unit_pg3->GPIOCount); + rc = -EINVAL; goto out; } diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index eed31021e7885c28f92bbaa97a6fda71b53527cf..ba84244c1b4f65e7a34b9f9b78252985996d8839 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -239,6 +239,7 @@ static struct { {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES | BLIST_INQUIRY_36}, {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 42f0550d6b11ff81b2306fc135dff8f716516131..6f41e4b5a2b85fcc392f3172c0031d71af7883b6 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -63,6 +63,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"LSI", "INF-01-00", "rdac", }, {"ENGENIO", "INF-01-00", "rdac", }, {"LENOVO", "DE_Series", "rdac", }, + {"FUJITSU", "ETERNUS_AHB", "rdac", }, {NULL, NULL, NULL }, }; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0ba7a65e7c8d96619ec0f285e0376776c281d374..06056e9ec3335bdead7ea49cbd749b24941e2e72 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -547,6 +547,15 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) scsi_uninit_cmd(cmd); } +static void scsi_run_queue_async(struct scsi_device *sdev) +{ + if (scsi_target(sdev)->single_lun || + !list_empty(&sdev->host->starved_list)) + kblockd_schedule_work(&sdev->requeue_work); + else + blk_mq_run_hw_queues(sdev->request_queue, true); +} + /* Returns false when no more bytes to process, true if there are more */ static bool scsi_end_request(struct request *req, blk_status_t error, unsigned int bytes) @@ -591,11 +600,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, __blk_mq_end_request(req, error); - if (scsi_target(sdev)->single_lun || - !list_empty(&sdev->host->starved_list)) - kblockd_schedule_work(&sdev->requeue_work); - else - blk_mq_run_hw_queues(q, true); + scsi_run_queue_async(sdev); percpu_ref_put(&q->q_usage_counter); return false; @@ -1702,6 +1707,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, */ if (req->rq_flags & RQF_DONTPREP) scsi_mq_uninit_cmd(cmd); + scsi_run_queue_async(sdev); break; } return ret; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index f4cc08eb47ba886e52e8fe6b0a38bf514ef3adc9..7ae5024e78243b412dd37b3aee8d86fb1621c001 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -4760,7 +4760,7 @@ static __init int iscsi_transport_init(void) iscsi_eh_timer_workq = alloc_workqueue("%s", WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, - 2, "iscsi_eh"); + 1, "iscsi_eh"); if (!iscsi_eh_timer_workq) { err = -ENOMEM; goto release_nls; diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index f8661062ef9547a322c4b6f2ed2fa7d0eb8b2d7a..f3d5b1bbd5aa7eff36c5d91cd9d2df6e41a77f8d 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -339,7 +339,7 @@ store_spi_transport_##field(struct device *dev, \ struct spi_transport_attrs *tp \ = (struct spi_transport_attrs *)&starget->starget_data; \ \ - if (i->f->set_##field) \ + if (!i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ if (val > tp->max_##field) \ diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c index 01fc0d20a70dbc821744c406ed8f8df0467345e5..6f54bd832c8b8023f0038f07ef95ac5159390a83 100644 --- a/drivers/soc/amlogic/meson-gx-socinfo.c +++ b/drivers/soc/amlogic/meson-gx-socinfo.c @@ -66,10 +66,12 @@ static const struct meson_gx_package_id { { "A113D", 0x25, 0x22, 0xff }, { "S905D2", 0x28, 0x10, 0xf0 }, { "S905X2", 0x28, 0x40, 0xf0 }, - { "S922X", 0x29, 0x40, 0xf0 }, { "A311D", 0x29, 0x10, 0xf0 }, - { "S905X3", 0x2b, 0x5, 0xf }, - { "S905D3", 0x2b, 0xb0, 0xf0 }, + { "S922X", 0x29, 0x40, 0xf0 }, + { "S905D3", 0x2b, 0x4, 0xf5 }, + { "S905X3", 0x2b, 0x5, 0xf5 }, + { "S905X3", 0x2b, 0x10, 0x3f }, + { "S905D3", 0x2b, 0x30, 0x3f }, { "A113L", 0x2c, 0x0, 0xf8 }, }; diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c index fec3d672b606aaf397f47c699531c4f1dc78cbaf..01bfea1cb64a86c2ccaa8871263754c1f4b2f4a3 100644 --- a/drivers/soc/imx/soc-imx.c +++ b/drivers/soc/imx/soc-imx.c @@ -33,6 +33,9 @@ static int __init imx_soc_device_init(void) u32 val; int ret; + if (of_machine_is_compatible("fsl,ls1021a")) + return 0; + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM; diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 4cfdd074e310d9688c81eb3fece7985508471776..c7422740edd4ccf3e4ccd424dfbd0dff0c04951e 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -930,8 +930,9 @@ static int intel_create_dai(struct sdw_cdns *cdns, /* TODO: Read supported rates/formats from hardware */ for (i = off; i < (off + num); i++) { - dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d", - cdns->instance, i); + dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL, + "SDW%d Pin%d", + cdns->instance, i); if (!dais[i].name) return -ENOMEM; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 6783e12c40c22ecbd9b808c96122702146bd28a1..a556795caeef47ce50b5ca687361258f761b5705 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -36,7 +36,6 @@ #define SPI_CFG0_SCK_LOW_OFFSET 8 #define SPI_CFG0_CS_HOLD_OFFSET 16 #define SPI_CFG0_CS_SETUP_OFFSET 24 -#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16 @@ -48,6 +47,8 @@ #define SPI_CFG1_CS_IDLE_MASK 0xff #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 +#define SPI_CFG2_SCK_HIGH_OFFSET 0 +#define SPI_CFG2_SCK_LOW_OFFSET 16 #define SPI_CMD_ACT BIT(0) #define SPI_CMD_RESUME BIT(1) @@ -283,7 +284,7 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable) static void mtk_spi_prepare_transfer(struct spi_master *master, struct spi_transfer *xfer) { - u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0; + u32 spi_clk_hz, div, sck_time, cs_time, reg_val; struct mtk_spi *mdata = spi_master_get_devdata(master); spi_clk_hz = clk_get_rate(mdata->spi_clk); @@ -296,18 +297,18 @@ static void mtk_spi_prepare_transfer(struct spi_master *master, cs_time = sck_time * 2; if (mdata->dev_comp->enhance_timing) { + reg_val = (((sck_time - 1) & 0xffff) + << SPI_CFG2_SCK_HIGH_OFFSET); reg_val |= (((sck_time - 1) & 0xffff) - << SPI_CFG0_SCK_HIGH_OFFSET); - reg_val |= (((sck_time - 1) & 0xffff) - << SPI_ADJUST_CFG0_SCK_LOW_OFFSET); + << SPI_CFG2_SCK_LOW_OFFSET); writel(reg_val, mdata->base + SPI_CFG2_REG); - reg_val |= (((cs_time - 1) & 0xffff) + reg_val = (((cs_time - 1) & 0xffff) << SPI_ADJUST_CFG0_CS_HOLD_OFFSET); reg_val |= (((cs_time - 1) & 0xffff) << SPI_ADJUST_CFG0_CS_SETUP_OFFSET); writel(reg_val, mdata->base + SPI_CFG0_REG); } else { - reg_val |= (((sck_time - 1) & 0xff) + reg_val = (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET); reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index ecea15534c42e1b66fa4798a85c8d2e33e085136..fa11cc0e809b756e602d9c87d251cdff9a94b153 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -198,7 +198,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, struct spi_transfer *tfr) { struct sun6i_spi *sspi = spi_master_get_devdata(master); - unsigned int mclk_rate, div, timeout; + unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout; unsigned int start, end, tx_time; unsigned int trig_level; unsigned int tx_len = 0; @@ -287,14 +287,12 @@ static int sun6i_spi_transfer_one(struct spi_master *master, * First try CDR2, and if we can't reach the expected * frequency, fall back to CDR1. */ - div = mclk_rate / (2 * tfr->speed_hz); - if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { - if (div > 0) - div--; - - reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS; + div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz); + div_cdr2 = DIV_ROUND_UP(div_cdr1, 2); + if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) { + reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS; } else { - div = ilog2(mclk_rate) - ilog2(tfr->speed_hz); + div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1)); reg = SUN6I_CLK_CTL_CDR1(div); } diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c index 560649be9d1309479d959e20c57e68b34f39abb2..e035c9f757a1ca317c7e71f61081c0bcd763fef7 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1032.c +++ b/drivers/staging/comedi/drivers/addi_apci_1032.c @@ -106,14 +106,22 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1032_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -136,8 +144,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1032_CTRL_INT_ENA | @@ -154,8 +162,8 @@ static int apci1032_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c index 45ad4ba92f94f2cbba62aa844a6bf1229de7ae5b..816dd25b9d0e451d5312b762fd636be74e6ce067 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1500.c +++ b/drivers/staging/comedi/drivers/addi_apci_1500.c @@ -452,13 +452,14 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, struct apci1500_private *devpriv = dev->private; unsigned int trig = data[1]; unsigned int shift = data[3]; - unsigned int hi_mask = data[4] << shift; - unsigned int lo_mask = data[5] << shift; - unsigned int chan_mask = hi_mask | lo_mask; - unsigned int old_mask = (1 << shift) - 1; - unsigned int pm = devpriv->pm[trig] & old_mask; - unsigned int pt = devpriv->pt[trig] & old_mask; - unsigned int pp = devpriv->pp[trig] & old_mask; + unsigned int hi_mask; + unsigned int lo_mask; + unsigned int chan_mask; + unsigned int old_mask; + unsigned int pm; + unsigned int pt; + unsigned int pp; + unsigned int invalid_chan; if (trig > 1) { dev_dbg(dev->class_dev, @@ -466,11 +467,28 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev, return -EINVAL; } - if (chan_mask > 0xffff) { + if (shift <= 16) { + hi_mask = data[4] << shift; + lo_mask = data[5] << shift; + old_mask = (1U << shift) - 1; + invalid_chan = (data[4] | data[5]) >> (16 - shift); + } else { + hi_mask = 0; + lo_mask = 0; + old_mask = 0xffff; + invalid_chan = data[4] | data[5]; + } + chan_mask = hi_mask | lo_mask; + + if (invalid_chan) { dev_dbg(dev->class_dev, "invalid digital trigger channel\n"); return -EINVAL; } + pm = devpriv->pm[trig] & old_mask; + pt = devpriv->pt[trig] & old_mask; + pp = devpriv->pp[trig] & old_mask; + switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: /* clear trigger configuration */ diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c index 10501fe6bb25363fc88cd1d1b194ee511e45e2c1..1268ba34be5f0981d849236ee2d7659abb383731 100644 --- a/drivers/staging/comedi/drivers/addi_apci_1564.c +++ b/drivers/staging/comedi/drivers/addi_apci_1564.c @@ -331,14 +331,22 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, unsigned int *data) { struct apci1564_private *devpriv = dev->private; - unsigned int shift, oldmask; + unsigned int shift, oldmask, himask, lomask; switch (data[0]) { case INSN_CONFIG_DIGITAL_TRIG: if (data[1] != 0) return -EINVAL; shift = data[3]; - oldmask = (1U << shift) - 1; + if (shift < 32) { + oldmask = (1U << shift) - 1; + himask = data[4] << shift; + lomask = data[5] << shift; + } else { + oldmask = 0xffffffffu; + himask = 0; + lomask = 0; + } switch (data[2]) { case COMEDI_DIGITAL_TRIG_DISABLE: devpriv->ctrl = 0; @@ -362,8 +370,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA | @@ -380,8 +388,8 @@ static int apci1564_cos_insn_config(struct comedi_device *dev, devpriv->mode2 &= oldmask; } /* configure specified channels */ - devpriv->mode1 |= data[4] << shift; - devpriv->mode2 |= data[5] << shift; + devpriv->mode1 |= himask; + devpriv->mode2 |= lomask; break; default: return -EINVAL; diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c index 4d1eccb5041dc45a04f9179e5a47e8e5364e4eae..4518c2680b7c5e986b660ed575acc02fef5f17e7 100644 --- a/drivers/staging/comedi/drivers/ni_6527.c +++ b/drivers/staging/comedi/drivers/ni_6527.c @@ -332,7 +332,7 @@ static int ni6527_intr_insn_config(struct comedi_device *dev, case COMEDI_DIGITAL_TRIG_ENABLE_EDGES: /* check shift amount */ shift = data[3]; - if (shift >= s->n_chan) { + if (shift >= 32) { mask = 0; rising = 0; falling = 0; diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig index fea06cb0eb48f4194f67019123579545a2cf749d..37577bb7299809bda15201e6367198a9877f1570 100644 --- a/drivers/staging/media/atomisp/Kconfig +++ b/drivers/staging/media/atomisp/Kconfig @@ -22,7 +22,7 @@ config VIDEO_ATOMISP module will be called atomisp config VIDEO_ATOMISP_ISP2401 - bool "VIDEO_ATOMISP_ISP2401" + bool "Use Intel Atom ISP on Cherrytail/Anniedale (ISP2401)" depends on VIDEO_ATOMISP help Enable support for Atom ISP2401-based boards. diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile index 9dc8072799e3f405949c56aa3f695a05c6879831..205d0f8cc2e120f93d56bc5368e9d3a883312ee6 100644 --- a/drivers/staging/media/atomisp/Makefile +++ b/drivers/staging/media/atomisp/Makefile @@ -156,6 +156,7 @@ atomisp-objs += \ pci/hive_isp_css_common/host/timed_ctrl.o \ pci/hive_isp_css_common/host/vmem.o \ pci/hive_isp_css_shared/host/tag.o \ + pci/system_local.o \ obj-byt = \ pci/css_2400_system/hive/ia_css_isp_configs.o \ @@ -182,7 +183,6 @@ INCLUDES += \ -I$(atomisp)/include/hmm/ \ -I$(atomisp)/include/mmu/ \ -I$(atomisp)/pci/ \ - -I$(atomisp)/pci/hrt/ \ -I$(atomisp)/pci/base/circbuf/interface/ \ -I$(atomisp)/pci/base/refcount/interface/ \ -I$(atomisp)/pci/camera/pipe/interface/ \ @@ -192,7 +192,6 @@ INCLUDES += \ -I$(atomisp)/pci/hive_isp_css_include/ \ -I$(atomisp)/pci/hive_isp_css_include/device_access/ \ -I$(atomisp)/pci/hive_isp_css_include/host/ \ - -I$(atomisp)/pci/hive_isp_css_include/memory_access/ \ -I$(atomisp)/pci/hive_isp_css_shared/ \ -I$(atomisp)/pci/hive_isp_css_shared/host/ \ -I$(atomisp)/pci/isp/kernels/ \ @@ -311,9 +310,7 @@ INCLUDES += \ -I$(atomisp)/pci/runtime/tagger/interface/ INCLUDES_byt += \ - -I$(atomisp)/pci/css_2400_system/ \ -I$(atomisp)/pci/css_2400_system/hive/ \ - -I$(atomisp)/pci/css_2400_system/hrt/ \ INCLUDES_cht += \ -I$(atomisp)/pci/css_2401_system/ \ @@ -321,7 +318,6 @@ INCLUDES_cht += \ -I$(atomisp)/pci/css_2401_system/hive/ \ -I$(atomisp)/pci/css_2401_system/hrt/ \ -# -I$(atomisp)/pci/css_2401_system/hrt/ \ # -I$(atomisp)/pci/css_2401_system/hive_isp_css_2401_system_generated/ \ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__ diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c index 90d125ba080f68296dec92a77ddc34d4c2dbf56f..c90730513438bd8af6f6da87040871ac772d92f7 100644 --- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c +++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c @@ -495,11 +495,11 @@ static int ov2680_h_flip(struct v4l2_subdev *sd, s32 value) ret = ov2680_read_reg(client, 1, OV2680_MIRROR_REG, &val); if (ret) return ret; - if (value) { + if (value) val |= OV2680_FLIP_MIRROR_BIT_ENABLE; - } else { + else val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE; - } + ret = ov2680_write_reg(client, 1, OV2680_MIRROR_REG, val); if (ret) diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c index 97ab10bc45ca654acb1165ed8319cfdea7e55881..e698b63d6cb7a2d47dfc2fc3ba379e1adcd1c8f8 100644 --- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c +++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c @@ -1899,7 +1899,7 @@ static int ov5693_probe(struct i2c_client *client) { struct ov5693_device *dev; int i2c; - int ret = 0; + int ret; void *pdata; unsigned int i; @@ -1929,8 +1929,10 @@ static int ov5693_probe(struct i2c_client *client) pdata = gmin_camera_platform_data(&dev->sd, ATOMISP_INPUT_FORMAT_RAW_10, atomisp_bayer_order_bggr); - if (!pdata) + if (!pdata) { + ret = -EINVAL; goto out_free; + } ret = ov5693_s_config(&dev->sd, client->irq, pdata); if (ret) diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h index 873344a02ccf670ef26d528fcc4a8234e255bc78..5a5121d958ede5c435a5abe5188fee81150f76ee 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h @@ -250,6 +250,7 @@ const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void); #define IS_MFLD __IS_SOC(INTEL_FAM6_ATOM_SALTWELL_MID) #define IS_BYT __IS_SOC(INTEL_FAM6_ATOM_SILVERMONT) #define IS_CHT __IS_SOC(INTEL_FAM6_ATOM_AIRMONT) +#define IS_MRFD __IS_SOC(INTEL_FAM6_ATOM_SILVERMONT_MID) #define IS_MOFD __IS_SOC(INTEL_FAM6_ATOM_AIRMONT_MID) /* Both CHT and MOFD come with ISP2401 */ diff --git a/drivers/staging/media/atomisp/pci/atomisp-regs.h b/drivers/staging/media/atomisp/pci/atomisp-regs.h index de34ee28e3900fc0c030906bbad33f09bfd5b1a4..022997f47121b4d24e5c93899a2a505106d7c8d1 100644 --- a/drivers/staging/media/atomisp/pci/atomisp-regs.h +++ b/drivers/staging/media/atomisp/pci/atomisp-regs.h @@ -20,9 +20,6 @@ #define ATOMISP_REGS_H /* common register definitions */ -#define PUNIT_PORT 0x04 -#define CCK_PORT 0x14 - #define PCICMDSTS 0x01 #define INTR 0x0f #define MSI_CAPID 0x24 diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c index 76861396ba864d211098bd6e2c7223bc5cb050ff..f638d0bd09fe6f4d3c956e17f79c050ac56de31d 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_acc.c +++ b/drivers/staging/media/atomisp/pci/atomisp_acc.c @@ -355,11 +355,11 @@ int atomisp_acc_map(struct atomisp_sub_device *asd, struct atomisp_acc_map *map) pgnr = DIV_ROUND_UP(map->length, PAGE_SIZE); if (pgnr < ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) { - dev_err(atomisp_dev, + dev_err(asd->isp->dev, "user space memory size is less than the expected size..\n"); return -ENOMEM; } else if (pgnr > ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) { - dev_err(atomisp_dev, + dev_err(asd->isp->dev, "user space memory size is large than the expected size..\n"); return -ENOMEM; } diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index 7b936e5a5f037ac370e6921f72d9cda80c7f2b74..8ea65bef35d24f4db4caa79c246e46f6df9d59ab 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -109,7 +110,7 @@ struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev) static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd) { - struct v4l2_subdev_frame_interval fi; + struct v4l2_subdev_frame_interval fi = { 0 }; struct atomisp_device *isp = asd->isp; unsigned short fps = 0; @@ -206,6 +207,7 @@ int atomisp_freq_scaling(struct atomisp_device *isp, enum atomisp_dfs_mode mode, bool force) { + struct pci_dev *pdev = to_pci_dev(isp->dev); /* FIXME! Only use subdev[0] status yet */ struct atomisp_sub_device *asd = &isp->asd[0]; const struct atomisp_dfs_config *dfs; @@ -219,7 +221,7 @@ int atomisp_freq_scaling(struct atomisp_device *isp, return -EINVAL; } - if ((isp->pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) == + if ((pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) == ATOMISP_PCI_DEVICE_SOC_CHT && ATOMISP_USE_YUVPP(asd)) isp->dfs = &dfs_config_cht_soc; @@ -357,39 +359,41 @@ static void clear_isp_irq(enum hrt_isp_css_irq irq) irq_clear_all(IRQ0_ID); } -void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev) +void atomisp_msi_irq_init(struct atomisp_device *isp) { + struct pci_dev *pdev = to_pci_dev(isp->dev); u32 msg32; u16 msg16; - pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32); + pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32); msg32 |= 1 << MSI_ENABLE_BIT; - pci_write_config_dword(dev, PCI_MSI_CAPID, msg32); + pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32); msg32 = (1 << INTR_IER) | (1 << INTR_IIR); - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32); + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32); - pci_read_config_word(dev, PCI_COMMAND, &msg16); + pci_read_config_word(pdev, PCI_COMMAND, &msg16); msg16 |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INTX_DISABLE); - pci_write_config_word(dev, PCI_COMMAND, msg16); + pci_write_config_word(pdev, PCI_COMMAND, msg16); } -void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev) +void atomisp_msi_irq_uninit(struct atomisp_device *isp) { + struct pci_dev *pdev = to_pci_dev(isp->dev); u32 msg32; u16 msg16; - pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32); + pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32); msg32 &= ~(1 << MSI_ENABLE_BIT); - pci_write_config_dword(dev, PCI_MSI_CAPID, msg32); + pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32); msg32 = 0x0; - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32); + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32); - pci_read_config_word(dev, PCI_COMMAND, &msg16); + pci_read_config_word(pdev, PCI_COMMAND, &msg16); msg16 &= ~(PCI_COMMAND_MASTER); - pci_write_config_word(dev, PCI_COMMAND, msg16); + pci_write_config_word(pdev, PCI_COMMAND, msg16); } static void atomisp_sof_event(struct atomisp_sub_device *asd) @@ -480,11 +484,12 @@ static void print_csi_rx_errors(enum mipi_port_id port, /* Clear irq reg */ static void clear_irq_reg(struct atomisp_device *isp) { + struct pci_dev *pdev = to_pci_dev(isp->dev); u32 msg_ret; - pci_read_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, &msg_ret); + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &msg_ret); msg_ret |= 1 << INTR_IIR; - pci_write_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, msg_ret); + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg_ret); } static struct atomisp_sub_device * @@ -665,11 +670,10 @@ bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe) void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr, unsigned int size) { - u32 __iomem *io_virt_addr; unsigned int data = 0; unsigned int size32 = DIV_ROUND_UP(size, sizeof(u32)); - dev_dbg(isp->dev, "atomisp_io_base:%p\n", atomisp_io_base); + dev_dbg(isp->dev, "atomisp mmio base: %p\n", isp->base); dev_dbg(isp->dev, "%s, addr:0x%x, size: %d, size32: %d\n", __func__, addr, size, size32); if (size32 * 4 + addr > 0x4000) { @@ -678,13 +682,12 @@ void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr, return; } addr += SP_DMEM_BASE; - io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); + addr &= 0x003FFFFF; do { - data = *io_virt_addr; + data = readl(isp->base + addr); dev_dbg(isp->dev, "%s, \t [0x%x]:0x%x\n", __func__, addr, data); - io_virt_addr += sizeof(u32); - size32 -= 1; - } while (size32 > 0); + addr += sizeof(u32); + } while (--size32); } static struct videobuf_buffer *atomisp_css_frame_to_vbuf( @@ -1289,6 +1292,7 @@ void atomisp_delayed_init_work(struct work_struct *work) static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) { + struct pci_dev *pdev = to_pci_dev(isp->dev); enum ia_css_pipe_id css_pipe_id; bool stream_restart[MAX_STREAM_NUM] = {0}; bool depth_mode = false; @@ -1372,8 +1376,8 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) clear_isp_irq(hrt_isp_css_irq_sp); /* Set the SRSE to 3 before resetting */ - pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control | - MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK); + pci_write_config_dword(pdev, PCI_I_CONTROL, + isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK); /* reset ISP and restore its state */ isp->isp_timeout = true; @@ -6158,6 +6162,7 @@ int atomisp_set_shading_table(struct atomisp_sub_device *asd, /*Turn off ISP dphy */ int atomisp_ospm_dphy_down(struct atomisp_device *isp) { + struct pci_dev *pdev = to_pci_dev(isp->dev); unsigned long flags; u32 reg; @@ -6179,9 +6184,9 @@ int atomisp_ospm_dphy_down(struct atomisp_device *isp) * MRFLD HW design need all CSI ports are disabled before * powering down the IUNIT. */ - pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, ®); + pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, ®); reg |= MRFLD_ALL_CSI_PORTS_OFF_MASK; - pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, reg); + pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, reg); return 0; } diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp_cmd.h index 0bde995f1a8d377d17edd536b4d0ce72bd93b1dc..1c0d464c2ac1d88fca345f5b05efcc0545d2f3e2 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.h +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.h @@ -68,8 +68,8 @@ bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe); /* * Interrupt functions */ -void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev); -void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev); +void atomisp_msi_irq_init(struct atomisp_device *isp); +void atomisp_msi_irq_uninit(struct atomisp_device *isp); void atomisp_wdt_work(struct work_struct *work); void atomisp_wdt(struct timer_list *t); void atomisp_setup_flash(struct atomisp_sub_device *asd); diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp_compat.h index b2ed83c2f337a1cfb20434dacd4b7038213a8b45..6a2a81a3eb2337b45c197c2247a076ee0c8b3eb1 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat.h +++ b/drivers/staging/media/atomisp/pci/atomisp_compat.h @@ -29,8 +29,6 @@ struct atomisp_sub_device; struct video_device; enum atomisp_input_stream_id; -extern void __iomem *atomisp_io_base; - struct atomisp_metadata_buf { struct ia_css_metadata *metadata; void *md_vptr; diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c index c1e282a974d03fba568346677afd6c17a6b49cec..cccc5bfa105732f117992341e471ed44870e7436 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c +++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c @@ -33,13 +33,12 @@ #include "atomisp_ioctl.h" #include "atomisp_acc.h" -#include - #include "ia_css_debug.h" #include "ia_css_isp_param.h" #include "sh_css_hrt.h" #include "ia_css_isys.h" +#include #include /* Assume max number of ACC stages */ @@ -69,92 +68,94 @@ struct bayer_ds_factor { static void atomisp_css2_hw_store_8(hrt_address addr, uint8_t data) { - s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); unsigned long flags; spin_lock_irqsave(&mmio_lock, flags); - *io_virt_addr = data; + writeb(data, isp->base + (addr & 0x003FFFFF)); spin_unlock_irqrestore(&mmio_lock, flags); } static void atomisp_css2_hw_store_16(hrt_address addr, uint16_t data) { - s16 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); unsigned long flags; spin_lock_irqsave(&mmio_lock, flags); - *io_virt_addr = data; + writew(data, isp->base + (addr & 0x003FFFFF)); spin_unlock_irqrestore(&mmio_lock, flags); } void atomisp_css2_hw_store_32(hrt_address addr, uint32_t data) { - s32 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); unsigned long flags; spin_lock_irqsave(&mmio_lock, flags); - *io_virt_addr = data; + writel(data, isp->base + (addr & 0x003FFFFF)); spin_unlock_irqrestore(&mmio_lock, flags); } static uint8_t atomisp_css2_hw_load_8(hrt_address addr) { - s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); unsigned long flags; u8 ret; spin_lock_irqsave(&mmio_lock, flags); - ret = *io_virt_addr; + ret = readb(isp->base + (addr & 0x003FFFFF)); spin_unlock_irqrestore(&mmio_lock, flags); return ret; } static uint16_t atomisp_css2_hw_load_16(hrt_address addr) { - s16 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); unsigned long flags; u16 ret; spin_lock_irqsave(&mmio_lock, flags); - ret = *io_virt_addr; + ret = readw(isp->base + (addr & 0x003FFFFF)); spin_unlock_irqrestore(&mmio_lock, flags); return ret; } static uint32_t atomisp_css2_hw_load_32(hrt_address addr) { - s32 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); unsigned long flags; u32 ret; spin_lock_irqsave(&mmio_lock, flags); - ret = *io_virt_addr; + ret = readl(isp->base + (addr & 0x003FFFFF)); spin_unlock_irqrestore(&mmio_lock, flags); return ret; } -static void atomisp_css2_hw_store(hrt_address addr, - const void *from, uint32_t n) +static void atomisp_css2_hw_store(hrt_address addr, const void *from, uint32_t n) { - s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); unsigned long flags; unsigned int i; + addr &= 0x003FFFFF; spin_lock_irqsave(&mmio_lock, flags); - for (i = 0; i < n; i++, io_virt_addr++, from++) - *io_virt_addr = *(s8 *)from; + for (i = 0; i < n; i++, from++) + writeb(*(s8 *)from, isp->base + addr + i); + spin_unlock_irqrestore(&mmio_lock, flags); } static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n) { - s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF); + struct atomisp_device *isp = dev_get_drvdata(atomisp_dev); unsigned long flags; unsigned int i; + addr &= 0x003FFFFF; spin_lock_irqsave(&mmio_lock, flags); - for (i = 0; i < n; i++, to++, io_virt_addr++) - *(s8 *)to = *io_virt_addr; + for (i = 0; i < n; i++, to++) + *(s8 *)to = readb(isp->base + addr + i); spin_unlock_irqrestore(&mmio_lock, flags); } @@ -181,10 +182,10 @@ void atomisp_load_uint32(hrt_address addr, uint32_t *data) *data = atomisp_css2_hw_load_32(addr); } -static int hmm_get_mmu_base_addr(unsigned int *mmu_base_addr) +static int hmm_get_mmu_base_addr(struct device *dev, unsigned int *mmu_base_addr) { if (!sh_mmu_mrfld.get_pd_base) { - dev_err(atomisp_dev, "get mmu base address failed.\n"); + dev_err(dev, "get mmu base address failed.\n"); return -EINVAL; } @@ -839,7 +840,7 @@ int atomisp_css_init(struct atomisp_device *isp) int ret; int err; - ret = hmm_get_mmu_base_addr(&mmu_base_addr); + ret = hmm_get_mmu_base_addr(isp->dev, &mmu_base_addr); if (ret) return ret; @@ -941,7 +942,7 @@ int atomisp_css_resume(struct atomisp_device *isp) unsigned int mmu_base_addr; int ret; - ret = hmm_get_mmu_base_addr(&mmu_base_addr); + ret = hmm_get_mmu_base_addr(isp->dev, &mmu_base_addr); if (ret) { dev_err(isp->dev, "get base address error.\n"); return -EINVAL; @@ -1966,8 +1967,7 @@ void atomisp_css_input_set_mode(struct atomisp_sub_device *asd, true, 0x13000, &size_mem_words) != 0) { - if (intel_mid_identify_cpu() == - INTEL_MID_CPU_CHIP_TANGIER) + if (IS_MRFD) size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_2; else size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_1; @@ -2414,13 +2414,13 @@ static void __configure_preview_pp_input(struct atomisp_sub_device *asd, struct ia_css_resolution *effective_res = &stream_config->input_config.effective_res; - const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} }; + static const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} }; /* * BZ201033: YUV decimation factor of 4 causes couple of rightmost * columns to be shaded. Remove this factor to work around the CSS bug. * const unsigned int yuv_dec_fct[] = {4, 2}; */ - const unsigned int yuv_dec_fct[] = { 2 }; + static const unsigned int yuv_dec_fct[] = { 2 }; unsigned int i; if (width == 0 && height == 0) @@ -2540,7 +2540,7 @@ static void __configure_video_pp_input(struct atomisp_sub_device *asd, struct ia_css_resolution *effective_res = &stream_config->input_config.effective_res; - const struct bayer_ds_factor bds_factors[] = { + static const struct bayer_ds_factor bds_factors[] = { {8, 1}, {6, 1}, {4, 1}, {3, 1}, {2, 1}, {3, 2} }; unsigned int i; @@ -4337,7 +4337,7 @@ static const char * const fw_acc_type_name[] = { [IA_CSS_ACC_STANDALONE] = "Stand-alone acceleration", }; -int atomisp_css_dump_blob_infor(void) +int atomisp_css_dump_blob_infor(struct atomisp_device *isp) { struct ia_css_blob_descr *bd = sh_css_blob_info; unsigned int i, nm = sh_css_num_binaries; @@ -4354,8 +4354,7 @@ int atomisp_css_dump_blob_infor(void) for (i = 0; i < sh_css_num_binaries - NUM_OF_SPS; i++) { switch (bd[i].header.type) { case ia_css_isp_firmware: - dev_dbg(atomisp_dev, - "Num%2d type %s (%s), binary id is %2d, name is %s\n", + dev_dbg(isp->dev, "Num%2d type %s (%s), binary id is %2d, name is %s\n", i + NUM_OF_SPS, fw_type_name[bd[i].header.type], fw_acc_type_name[bd[i].header.info.isp.type], @@ -4363,8 +4362,7 @@ int atomisp_css_dump_blob_infor(void) bd[i].name); break; default: - dev_dbg(atomisp_dev, - "Num%2d type %s, name is %s\n", + dev_dbg(isp->dev, "Num%2d type %s, name is %s\n", i + NUM_OF_SPS, fw_type_name[bd[i].header.type], bd[i].name); } diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h index 8376aec18e3e3f0e6d382a14b69be5bc551811eb..e0601534380f4c6e54bae743c418ddc59aada4d5 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h +++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h @@ -153,7 +153,7 @@ int atomisp_css_debug_dump_isp_binary(void); int atomisp_css_dump_sp_raw_copy_linecount(bool reduced); -int atomisp_css_dump_blob_infor(void); +int atomisp_css_dump_blob_infor(struct atomisp_device *isp); void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd, uint32_t isp_config_id); diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c index fe0e2bfde27f19022cb133d82dec33c9b0aac386..f670faf978e6e73484cece970308fc627a479c86 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c +++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c @@ -62,9 +62,9 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp, if (opt & OPTION_VALID) { if (opt & OPTION_BIN_LIST) { - ret = atomisp_css_dump_blob_infor(); + ret = atomisp_css_dump_blob_infor(isp); if (ret) { - dev_err(atomisp_dev, "%s dump blob infor err[ret:%d]\n", + dev_err(isp->dev, "%s dump blob infor err[ret:%d]\n", __func__, ret); goto opt_err; } @@ -76,7 +76,7 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp, atomisp_css_debug_dump_isp_binary(); } else { ret = -EPERM; - dev_err(atomisp_dev, "%s dump running bin err[ret:%d]\n", + dev_err(isp->dev, "%s dump running bin err[ret:%d]\n", __func__, ret); goto opt_err; } @@ -86,8 +86,7 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp, hmm_show_mem_stat(__func__, __LINE__); } else { ret = -EINVAL; - dev_err(atomisp_dev, "%s dump nothing[ret=%d]\n", __func__, - ret); + dev_err(isp->dev, "%s dump nothing[ret=%d]\n", __func__, ret); } opt_err: @@ -185,8 +184,9 @@ static void iunit_drvfs_remove_files(struct device_driver *drv) driver_remove_file(drv, &iunit_drvfs_attrs[i]); } -int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp) +int atomisp_drvfs_init(struct atomisp_device *isp) { + struct device_driver *drv = isp->dev->driver; int ret; iunit_debug.isp = isp; @@ -194,7 +194,7 @@ int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp) ret = iunit_drvfs_create_files(iunit_debug.drv); if (ret) { - dev_err(atomisp_dev, "drvfs_create_files error: %d\n", ret); + dev_err(isp->dev, "drvfs_create_files error: %d\n", ret); iunit_drvfs_remove_files(iunit_debug.drv); } diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.h b/drivers/staging/media/atomisp/pci/atomisp_drvfs.h index 4911037231fb1658a9f1de52401959a3a1c06588..8f4cc722b881e8162697f9af07d985d8f6ac2842 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_drvfs.h +++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.h @@ -19,7 +19,7 @@ #ifndef __ATOMISP_DRVFS_H__ #define __ATOMISP_DRVFS_H__ -int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp); +int atomisp_drvfs_init(struct atomisp_device *isp); void atomisp_drvfs_exit(void); #endif /* __ATOMISP_DRVFS_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c index 1af9da8acf4cf8f9a7b6aa00a47b49464e03d506..0df46a1af5f0a2a9f3895f86d44f49b410e292fe 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c +++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c @@ -26,6 +26,9 @@ enum clock_rate { #define CLK_RATE_19_2MHZ 19200000 #define CLK_RATE_25_0MHZ 25000000 +/* Valid clock number range from 0 to 5 */ +#define MAX_CLK_COUNT 5 + /* X-Powers AXP288 register set */ #define ALDO1_SEL_REG 0x28 #define ALDO1_CTRL3_REG 0x13 @@ -61,9 +64,7 @@ enum clock_rate { struct gmin_subdev { struct v4l2_subdev *subdev; - int clock_num; enum clock_rate clock_src; - bool clock_on; struct clk *pmc_clk; struct gpio_desc *gpio0; struct gpio_desc *gpio1; @@ -75,11 +76,16 @@ struct gmin_subdev { unsigned int csi_lanes; enum atomisp_input_format csi_fmt; enum atomisp_bayer_order csi_bayer; + + bool clock_on; bool v1p8_on; bool v2p8_on; bool v1p2_on; bool v2p8_vcm_on; + int v1p8_gpio; + int v2p8_gpio; + u8 pwm_i2c_addr; /* For PMIC AXP */ @@ -90,9 +96,9 @@ struct gmin_subdev { static struct gmin_subdev gmin_subdevs[MAX_SUBDEVS]; /* ACPI HIDs for the PMICs that could be used by this driver */ -#define PMIC_ACPI_AXP "INT33F4:00" /* XPower AXP288 PMIC */ -#define PMIC_ACPI_TI "INT33F5:00" /* Dollar Cove TI PMIC */ -#define PMIC_ACPI_CRYSTALCOVE "INT33FD:00" /* Crystal Cove PMIC */ +#define PMIC_ACPI_AXP "INT33F4" /* XPower AXP288 PMIC */ +#define PMIC_ACPI_TI "INT33F5" /* Dollar Cove TI PMIC */ +#define PMIC_ACPI_CRYSTALCOVE "INT33FD" /* Crystal Cove PMIC */ #define PMIC_PLATFORM_TI "intel_soc_pmic_chtdc_ti" @@ -105,7 +111,7 @@ static enum { } pmic_id; static const char *pmic_name[] = { - [PMIC_UNSET] = "unset", + [PMIC_UNSET] = "ACPI device PM", [PMIC_REGULATOR] = "regulator driver", [PMIC_AXP] = "XPower AXP288 PMIC", [PMIC_TI] = "Dollar Cove TI PMIC", @@ -119,24 +125,6 @@ static const struct atomisp_platform_data pdata = { .subdevs = pdata_subdevs, }; -/* - * Something of a hack. The ECS E7 board drives camera 2.8v from an - * external regulator instead of the PMIC. There's a gmin_CamV2P8 - * config variable that specifies the GPIO to handle this particular - * case, but this needs a broader architecture for handling camera - * power. - */ -enum { V2P8_GPIO_UNSET = -2, V2P8_GPIO_NONE = -1 }; -static int v2p8_gpio = V2P8_GPIO_UNSET; - -/* - * Something of a hack. The CHT RVP board drives camera 1.8v from an - * external regulator instead of the PMIC just like ECS E7 board, see the - * comments above. - */ -enum { V1P8_GPIO_UNSET = -2, V1P8_GPIO_NONE = -1 }; -static int v1p8_gpio = V1P8_GPIO_UNSET; - static LIST_HEAD(vcm_devices); static DEFINE_MUTEX(vcm_lock); @@ -199,6 +187,8 @@ int atomisp_register_i2c_module(struct v4l2_subdev *subdev, * gmin_subdev struct is already initialized for us. */ gs = find_gmin_subdev(subdev); + if (!gs) + return -ENODEV; pdata.subdevs[i].type = type; pdata.subdevs[i].port = gs->csi_port; @@ -294,6 +284,7 @@ static struct gmin_cfg_var mrd7_vars[] = { {"INT33F8:00_CsiFmt", "13"}, {"INT33F8:00_CsiBayer", "0"}, {"INT33F8:00_CamClk", "0"}, + {"INT33F9:00_CamType", "1"}, {"INT33F9:00_CsiPort", "0"}, {"INT33F9:00_CsiLanes", "1"}, @@ -309,6 +300,7 @@ static struct gmin_cfg_var ecs7_vars[] = { {"INT33BE:00_CsiFmt", "13"}, {"INT33BE:00_CsiBayer", "2"}, {"INT33BE:00_CamClk", "0"}, + {"INT33F0:00_CsiPort", "0"}, {"INT33F0:00_CsiLanes", "1"}, {"INT33F0:00_CsiFmt", "13"}, @@ -322,6 +314,7 @@ static struct gmin_cfg_var i8880_vars[] = { {"XXOV2680:00_CsiPort", "1"}, {"XXOV2680:00_CsiLanes", "1"}, {"XXOV2680:00_CamClk", "0"}, + {"XXGC0310:00_CsiPort", "0"}, {"XXGC0310:00_CsiLanes", "1"}, {"XXGC0310:00_CamClk", "1"}, @@ -381,34 +374,27 @@ static const guid_t atomisp_dsm_guid = GUID_INIT(0xdc2f6c4f, 0x045b, 0x4f1d, #define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */ static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME]; -static int gmin_i2c_match_one(struct device *dev, const void *data) -{ - const char *name = data; - struct i2c_client *client; - - if (dev->type != &i2c_client_type) - return 0; - - client = to_i2c_client(dev); - - return (!strcmp(name, client->name)); -} - static struct i2c_client *gmin_i2c_dev_exists(struct device *dev, char *name, struct i2c_client **client) { + struct acpi_device *adev; struct device *d; - while ((d = bus_find_device(&i2c_bus_type, NULL, name, - gmin_i2c_match_one))) { - *client = to_i2c_client(d); - dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n", - (*client)->name, (*client)->addr, - (*client)->adapter->nr); - return *client; - } + adev = acpi_dev_get_first_match_dev(name, NULL, -1); + if (!adev) + return NULL; - return NULL; + d = bus_find_device_by_acpi_dev(&i2c_bus_type, adev); + acpi_dev_put(adev); + if (!d) + return NULL; + + *client = i2c_verify_client(d); + put_device(d); + + dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n", + (*client)->name, (*client)->addr, (*client)->adapter->nr); + return *client; } static int gmin_i2c_write(struct device *dev, u16 i2c_addr, u8 reg, @@ -427,94 +413,222 @@ static int gmin_i2c_write(struct device *dev, u16 i2c_addr, u8 reg, "I2C write, addr: 0x%02x, reg: 0x%02x, value: 0x%02x, mask: 0x%02x\n", i2c_addr, reg, value, mask); - ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg, - value, mask); - - if (ret == -EOPNOTSUPP) { + ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg, value, mask); + if (ret == -EOPNOTSUPP) dev_err(dev, "ACPI didn't mapped the OpRegion needed to access I2C address 0x%02x.\n" - "Need to compile the Kernel using CONFIG_*_PMIC_OPREGION settings\n", + "Need to compile the kernel using CONFIG_*_PMIC_OPREGION settings\n", i2c_addr); - return ret; - } return ret; } -static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev) +static int atomisp_get_acpi_power(struct device *dev) { - struct i2c_client *power = NULL, *client = v4l2_get_subdevdata(subdev); - struct acpi_device *adev; - acpi_handle handle; - struct device *dev; - int i, ret; + char name[5]; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_buffer b_name = { sizeof(name), name }; + union acpi_object *package, *element; + acpi_handle handle = ACPI_HANDLE(dev); + acpi_handle rhandle; + acpi_status status; + int clock_num = -1; + int i; - if (!client) - return NULL; + status = acpi_evaluate_object(handle, "_PR0", NULL, &buffer); + if (!ACPI_SUCCESS(status)) + return -1; - dev = &client->dev; + package = buffer.pointer; - handle = ACPI_HANDLE(dev); + if (!buffer.length || !package + || package->type != ACPI_TYPE_PACKAGE + || !package->package.count) + goto fail; - // FIXME: may need to release resources allocated by acpi_bus_get_device() - if (!handle || acpi_bus_get_device(handle, &adev)) { - dev_err(dev, "Error could not get ACPI device\n"); - return NULL; - } + for (i = 0; i < package->package.count; i++) { + element = &package->package.elements[i]; - dev_info(&client->dev, "%s: ACPI detected it on bus ID=%s, HID=%s\n", - __func__, acpi_device_bid(adev), acpi_device_hid(adev)); + if (element->type != ACPI_TYPE_LOCAL_REFERENCE) + continue; - if (!pmic_id) { - if (gmin_i2c_dev_exists(dev, PMIC_ACPI_TI, &power)) - pmic_id = PMIC_TI; - else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_AXP, &power)) - pmic_id = PMIC_AXP; - else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_CRYSTALCOVE, &power)) - pmic_id = PMIC_CRYSTALCOVE; - else - pmic_id = PMIC_REGULATOR; + rhandle = element->reference.handle; + if (!rhandle) + goto fail; + + acpi_get_name(rhandle, ACPI_SINGLE_NAME, &b_name); + + dev_dbg(dev, "Found PM resource '%s'\n", name); + if (strlen(name) == 4 && !strncmp(name, "CLK", 3)) { + if (name[3] >= '0' && name[3] <= '4') + clock_num = name[3] - '0'; +#if 0 + /* + * We could abort here, but let's parse all resources, + * as this is helpful for debugging purposes + */ + if (clock_num >= 0) + break; +#endif + } } - for (i = 0; i < MAX_SUBDEVS && gmin_subdevs[i].subdev; i++) - ; - if (i >= MAX_SUBDEVS) - return NULL; +fail: + ACPI_FREE(buffer.pointer); + + return clock_num; +} + +static u8 gmin_get_pmic_id_and_addr(struct device *dev) +{ + struct i2c_client *power; + static u8 pmic_i2c_addr; + + if (pmic_id) + return pmic_i2c_addr; + + if (gmin_i2c_dev_exists(dev, PMIC_ACPI_TI, &power)) + pmic_id = PMIC_TI; + else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_AXP, &power)) + pmic_id = PMIC_AXP; + else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_CRYSTALCOVE, &power)) + pmic_id = PMIC_CRYSTALCOVE; + else + pmic_id = PMIC_REGULATOR; + + pmic_i2c_addr = power ? power->addr : 0; + return pmic_i2c_addr; +} + +static int gmin_detect_pmic(struct v4l2_subdev *subdev) +{ + struct i2c_client *client = v4l2_get_subdevdata(subdev); + struct device *dev = &client->dev; + u8 pmic_i2c_addr; + + pmic_i2c_addr = gmin_get_pmic_id_and_addr(dev); + dev_info(dev, "gmin: power management provided via %s (i2c addr 0x%02x)\n", + pmic_name[pmic_id], pmic_i2c_addr); + return pmic_i2c_addr; +} + +static int gmin_subdev_add(struct gmin_subdev *gs) +{ + struct i2c_client *client = v4l2_get_subdevdata(gs->subdev); + struct device *dev = &client->dev; + struct acpi_device *adev = ACPI_COMPANION(dev); + int ret, clock_num = -1; + + dev_info(dev, "%s: ACPI path is %pfw\n", __func__, dev_fwnode(dev)); + + /*WA:CHT requires XTAL clock as PLL is not stable.*/ + gs->clock_src = gmin_get_var_int(dev, false, "ClkSrc", + VLV2_CLK_PLL_19P2MHZ); + + gs->csi_port = gmin_get_var_int(dev, false, "CsiPort", 0); + gs->csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1); + + gs->gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW); + if (IS_ERR(gs->gpio0)) + gs->gpio0 = NULL; + else + dev_info(dev, "will handle gpio0 via ACPI\n"); + + gs->gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW); + if (IS_ERR(gs->gpio1)) + gs->gpio1 = NULL; + else + dev_info(dev, "will handle gpio1 via ACPI\n"); + + /* + * Those are used only when there is an external regulator apart + * from the PMIC that would be providing power supply, like on the + * two cases below: + * + * The ECS E7 board drives camera 2.8v from an external regulator + * instead of the PMIC. There's a gmin_CamV2P8 config variable + * that specifies the GPIO to handle this particular case, + * but this needs a broader architecture for handling camera power. + * + * The CHT RVP board drives camera 1.8v from an* external regulator + * instead of the PMIC just like ECS E7 board. + */ - if (power) { - gmin_subdevs[i].pwm_i2c_addr = power->addr; + gs->v1p8_gpio = gmin_get_var_int(dev, true, "V1P8GPIO", -1); + gs->v2p8_gpio = gmin_get_var_int(dev, true, "V2P8GPIO", -1); + + /* + * FIXME: + * + * The ACPI handling code checks for the _PR? tables in order to + * know what is required to switch the device from power state + * D0 (_PR0) up to D3COLD (_PR3). + * + * The adev->flags.power_manageable is set to true if the device + * has a _PR0 table, which can be checked by calling + * acpi_device_power_manageable(adev). + * + * However, this only says that the device can be set to power off + * mode. + * + * At least on the DSDT tables we've seen so far, there's no _PR3, + * nor _PS3 (which would have a somewhat similar effect). + * So, using ACPI for power management won't work, except if adding + * an ACPI override logic somewhere. + * + * So, at least for the existing devices we know, the check below + * will always be false. + */ + if (acpi_device_can_wakeup(adev) && + acpi_device_can_poweroff(adev)) { dev_info(dev, - "gmin: power management provided via %s (i2c addr 0x%02x)\n", - pmic_name[pmic_id], power->addr); - } else { - dev_info(dev, "gmin: power management provided via %s\n", - pmic_name[pmic_id]); + "gmin: power management provided via device PM\n"); + return 0; } - gmin_subdevs[i].subdev = subdev; - gmin_subdevs[i].clock_num = gmin_get_var_int(dev, false, "CamClk", 0); - /*WA:CHT requires XTAL clock as PLL is not stable.*/ - gmin_subdevs[i].clock_src = gmin_get_var_int(dev, false, "ClkSrc", - VLV2_CLK_PLL_19P2MHZ); - gmin_subdevs[i].csi_port = gmin_get_var_int(dev, false, "CsiPort", 0); - gmin_subdevs[i].csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1); + /* + * The code below is here due to backward compatibility with devices + * whose ACPI BIOS may not contain everything that would be needed + * in order to set clocks and do power management. + */ + + /* + * According with : + * https://github.com/projectceladon/hardware-intel-kernelflinger/blob/master/doc/fastboot.md + * + * The "CamClk" EFI var is set via fastboot on some Android devices, + * and seems to contain the number of the clock used to feed the + * sensor. + * + * On systems with a proper ACPI table, this is given via the _PR0 + * power resource table. The logic below should first check if there + * is a power resource already, falling back to the EFI vars detection + * otherwise. + */ - /* get PMC clock with clock framework */ - snprintf(gmin_pmc_clk_name, - sizeof(gmin_pmc_clk_name), - "%s_%d", "pmc_plt_clk", gmin_subdevs[i].clock_num); + /* Try first to use ACPI to get the clock resource */ + if (acpi_device_power_manageable(adev)) + clock_num = atomisp_get_acpi_power(dev); - gmin_subdevs[i].pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name); - if (IS_ERR(gmin_subdevs[i].pmc_clk)) { - ret = PTR_ERR(gmin_subdevs[i].pmc_clk); + /* Fall-back use EFI and/or DMI match */ + if (clock_num < 0) + clock_num = gmin_get_var_int(dev, false, "CamClk", 0); - dev_err(dev, - "Failed to get clk from %s : %d\n", - gmin_pmc_clk_name, - ret); + if (clock_num < 0 || clock_num > MAX_CLK_COUNT) { + dev_err(dev, "Invalid clock number\n"); + return -EINVAL; + } - return NULL; + snprintf(gmin_pmc_clk_name, sizeof(gmin_pmc_clk_name), + "%s_%d", "pmc_plt_clk", clock_num); + + gs->pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name); + if (IS_ERR(gs->pmc_clk)) { + ret = PTR_ERR(gs->pmc_clk); + dev_err(dev, "Failed to get clk from %s: %d\n", gmin_pmc_clk_name, ret); + return ret; } + dev_info(dev, "Will use CLK%d (%s)\n", clock_num, gmin_pmc_clk_name); /* * The firmware might enable the clock at @@ -526,25 +640,17 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev) * to disable a clock that has not been enabled, * we need to enable the clock first. */ - ret = clk_prepare_enable(gmin_subdevs[i].pmc_clk); + ret = clk_prepare_enable(gs->pmc_clk); if (!ret) - clk_disable_unprepare(gmin_subdevs[i].pmc_clk); - - gmin_subdevs[i].gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW); - if (IS_ERR(gmin_subdevs[i].gpio0)) - gmin_subdevs[i].gpio0 = NULL; - - gmin_subdevs[i].gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW); - if (IS_ERR(gmin_subdevs[i].gpio1)) - gmin_subdevs[i].gpio1 = NULL; + clk_disable_unprepare(gs->pmc_clk); switch (pmic_id) { case PMIC_REGULATOR: - gmin_subdevs[i].v1p8_reg = regulator_get(dev, "V1P8SX"); - gmin_subdevs[i].v2p8_reg = regulator_get(dev, "V2P8SX"); + gs->v1p8_reg = regulator_get(dev, "V1P8SX"); + gs->v2p8_reg = regulator_get(dev, "V2P8SX"); - gmin_subdevs[i].v1p2_reg = regulator_get(dev, "V1P2A"); - gmin_subdevs[i].v2p8_vcm_reg = regulator_get(dev, "VPROG4B"); + gs->v1p2_reg = regulator_get(dev, "V1P2A"); + gs->v2p8_vcm_reg = regulator_get(dev, "VPROG4B"); /* Note: ideally we would initialize v[12]p8_on to the * output of regulator_is_enabled(), but sadly that @@ -556,32 +662,31 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev) break; case PMIC_AXP: - gmin_subdevs[i].eldo1_1p8v = gmin_get_var_int(dev, false, - "eldo1_1p8v", - ELDO1_1P8V); - gmin_subdevs[i].eldo1_sel_reg = gmin_get_var_int(dev, false, - "eldo1_sel_reg", - ELDO1_SEL_REG); - gmin_subdevs[i].eldo1_ctrl_shift = gmin_get_var_int(dev, false, - "eldo1_ctrl_shift", - ELDO1_CTRL_SHIFT); - gmin_subdevs[i].eldo2_1p8v = gmin_get_var_int(dev, false, - "eldo2_1p8v", - ELDO2_1P8V); - gmin_subdevs[i].eldo2_sel_reg = gmin_get_var_int(dev, false, - "eldo2_sel_reg", - ELDO2_SEL_REG); - gmin_subdevs[i].eldo2_ctrl_shift = gmin_get_var_int(dev, false, - "eldo2_ctrl_shift", - ELDO2_CTRL_SHIFT); - gmin_subdevs[i].pwm_i2c_addr = power->addr; + gs->eldo1_1p8v = gmin_get_var_int(dev, false, + "eldo1_1p8v", + ELDO1_1P8V); + gs->eldo1_sel_reg = gmin_get_var_int(dev, false, + "eldo1_sel_reg", + ELDO1_SEL_REG); + gs->eldo1_ctrl_shift = gmin_get_var_int(dev, false, + "eldo1_ctrl_shift", + ELDO1_CTRL_SHIFT); + gs->eldo2_1p8v = gmin_get_var_int(dev, false, + "eldo2_1p8v", + ELDO2_1P8V); + gs->eldo2_sel_reg = gmin_get_var_int(dev, false, + "eldo2_sel_reg", + ELDO2_SEL_REG); + gs->eldo2_ctrl_shift = gmin_get_var_int(dev, false, + "eldo2_ctrl_shift", + ELDO2_CTRL_SHIFT); break; default: break; } - return &gmin_subdevs[i]; + return 0; } static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev) @@ -591,7 +696,17 @@ static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev) for (i = 0; i < MAX_SUBDEVS; i++) if (gmin_subdevs[i].subdev == subdev) return &gmin_subdevs[i]; - return gmin_subdev_add(subdev); + return NULL; +} + +static struct gmin_subdev *find_free_gmin_subdev_slot(void) +{ + unsigned int i; + + for (i = 0; i < MAX_SUBDEVS; i++) + if (gmin_subdevs[i].subdev == NULL) + return &gmin_subdevs[i]; + return NULL; } static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs, @@ -700,32 +815,24 @@ static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on) { struct gmin_subdev *gs = find_gmin_subdev(subdev); int ret; - struct device *dev; - struct i2c_client *client = v4l2_get_subdevdata(subdev); int value; - dev = &client->dev; - - if (v1p8_gpio == V1P8_GPIO_UNSET) { - v1p8_gpio = gmin_get_var_int(dev, true, - "V1P8GPIO", V1P8_GPIO_NONE); - if (v1p8_gpio != V1P8_GPIO_NONE) { - pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n", - v1p8_gpio); - ret = gpio_request(v1p8_gpio, "camera_v1p8_en"); - if (!ret) - ret = gpio_direction_output(v1p8_gpio, 0); - if (ret) - pr_err("V1P8 GPIO initialization failed\n"); - } + if (gs->v1p8_gpio >= 0) { + pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n", + gs->v1p8_gpio); + ret = gpio_request(gs->v1p8_gpio, "camera_v1p8_en"); + if (!ret) + ret = gpio_direction_output(gs->v1p8_gpio, 0); + if (ret) + pr_err("V1P8 GPIO initialization failed\n"); } if (!gs || gs->v1p8_on == on) return 0; gs->v1p8_on = on; - if (v1p8_gpio >= 0) - gpio_set_value(v1p8_gpio, on); + if (gs->v1p8_gpio >= 0) + gpio_set_value(gs->v1p8_gpio, on); if (gs->v1p8_reg) { regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000); @@ -762,32 +869,24 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on) { struct gmin_subdev *gs = find_gmin_subdev(subdev); int ret; - struct device *dev; - struct i2c_client *client = v4l2_get_subdevdata(subdev); int value; - dev = &client->dev; - - if (v2p8_gpio == V2P8_GPIO_UNSET) { - v2p8_gpio = gmin_get_var_int(dev, true, - "V2P8GPIO", V2P8_GPIO_NONE); - if (v2p8_gpio != V2P8_GPIO_NONE) { - pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n", - v2p8_gpio); - ret = gpio_request(v2p8_gpio, "camera_v2p8"); - if (!ret) - ret = gpio_direction_output(v2p8_gpio, 0); - if (ret) - pr_err("V2P8 GPIO initialization failed\n"); - } + if (gs->v2p8_gpio >= 0) { + pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n", + gs->v2p8_gpio); + ret = gpio_request(gs->v2p8_gpio, "camera_v2p8"); + if (!ret) + ret = gpio_direction_output(gs->v2p8_gpio, 0); + if (ret) + pr_err("V2P8 GPIO initialization failed\n"); } if (!gs || gs->v2p8_on == on) return 0; gs->v2p8_on = on; - if (v2p8_gpio >= 0) - gpio_set_value(v2p8_gpio, on); + if (gs->v2p8_gpio >= 0) + gpio_set_value(gs->v2p8_gpio, on); if (gs->v2p8_reg) { regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000); @@ -819,6 +918,37 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on) return -EINVAL; } +static int gmin_acpi_pm_ctrl(struct v4l2_subdev *subdev, int on) +{ + int ret = 0; + struct gmin_subdev *gs = find_gmin_subdev(subdev); + struct i2c_client *client = v4l2_get_subdevdata(subdev); + struct acpi_device *adev = ACPI_COMPANION(&client->dev); + + /* Use the ACPI power management to control it */ + on = !!on; + if (gs->clock_on == on) + return 0; + + dev_dbg(subdev->dev, "Setting power state to %s\n", + on ? "on" : "off"); + + if (on) + ret = acpi_device_set_power(adev, + ACPI_STATE_D0); + else + ret = acpi_device_set_power(adev, + ACPI_STATE_D3_COLD); + + if (!ret) + gs->clock_on = on; + else + dev_err(subdev->dev, "Couldn't set power state to %s\n", + on ? "on" : "off"); + + return ret; +} + static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on) { int ret = 0; @@ -884,7 +1014,7 @@ static struct camera_vcm_control *gmin_get_vcm_ctrl(struct v4l2_subdev *subdev, return NULL; } -static struct camera_sensor_platform_data gmin_plat = { +static struct camera_sensor_platform_data pmic_gmin_plat = { .gpio0_ctrl = gmin_gpio0_ctrl, .gpio1_ctrl = gmin_gpio1_ctrl, .v1p8_ctrl = gmin_v1p8_ctrl, @@ -895,17 +1025,36 @@ static struct camera_sensor_platform_data gmin_plat = { .get_vcm_ctrl = gmin_get_vcm_ctrl, }; +static struct camera_sensor_platform_data acpi_gmin_plat = { + .gpio0_ctrl = gmin_gpio0_ctrl, + .gpio1_ctrl = gmin_gpio1_ctrl, + .v1p8_ctrl = gmin_acpi_pm_ctrl, + .v2p8_ctrl = gmin_acpi_pm_ctrl, + .v1p2_ctrl = gmin_acpi_pm_ctrl, + .flisclk_ctrl = gmin_acpi_pm_ctrl, + .csi_cfg = gmin_csi_cfg, + .get_vcm_ctrl = gmin_get_vcm_ctrl, +}; + struct camera_sensor_platform_data *gmin_camera_platform_data( struct v4l2_subdev *subdev, enum atomisp_input_format csi_format, enum atomisp_bayer_order csi_bayer) { - struct gmin_subdev *gs = find_gmin_subdev(subdev); + u8 pmic_i2c_addr = gmin_detect_pmic(subdev); + struct gmin_subdev *gs; + gs = find_free_gmin_subdev_slot(); + gs->subdev = subdev; gs->csi_fmt = csi_format; gs->csi_bayer = csi_bayer; + gs->pwm_i2c_addr = pmic_i2c_addr; - return &gmin_plat; + gmin_subdev_add(gs); + if (gs->pmc_clk) + return &pmic_gmin_plat; + else + return &acpi_gmin_plat; } EXPORT_SYMBOL_GPL(gmin_camera_platform_data); @@ -957,12 +1106,28 @@ static int gmin_get_config_dsm_var(struct device *dev, union acpi_object *obj, *cur = NULL; int i; + /* + * The data reported by "CamClk" seems to be either 0 or 1 at the + * _DSM table. + * + * At the ACPI tables we looked so far, this is not related to the + * actual clock source for the sensor, which is given by the + * _PR0 ACPI table. So, ignore it, as otherwise this will be + * set to a wrong value. + */ + if (!strcmp(var, "CamClk")) + return -EINVAL; + obj = acpi_evaluate_dsm(handle, &atomisp_dsm_guid, 0, 0, NULL); if (!obj) { dev_info_once(dev, "Didn't find ACPI _DSM table.\n"); return -EINVAL; } + /* Return on unexpected object type */ + if (obj->type != ACPI_TYPE_PACKAGE) + return -EINVAL; + #if 0 /* Just for debugging purposes */ for (i = 0; i < obj->package.count; i++) { union acpi_object *cur = &obj->package.elements[i]; @@ -1155,10 +1320,10 @@ EXPORT_SYMBOL_GPL(camera_sensor_csi); * trying. The driver itself does direct calls to the PUNIT to manage * ISP power. */ -static void isp_pm_cap_fixup(struct pci_dev *dev) +static void isp_pm_cap_fixup(struct pci_dev *pdev) { - dev_info(&dev->dev, "Disabling PCI power management on camera ISP\n"); - dev->pm_cap = 0; + dev_info(&pdev->dev, "Disabling PCI power management on camera ISP\n"); + pdev->pm_cap = 0; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0f38, isp_pm_cap_fixup); diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h index ff3becd411109093a4a63c2b478207cd1a7ee14c..c01db10bb7355df89e3676ff0a9b21375ef25c75 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_internal.h +++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h @@ -216,12 +216,12 @@ struct atomisp_sw_contex { * ci device struct */ struct atomisp_device { - struct pci_dev *pdev; struct device *dev; struct v4l2_device v4l2_dev; struct media_device media_dev; struct atomisp_platform_data *pdata; void *mmu_l1_base; + void __iomem *base; const struct firmware *firmware; struct pm_qos_request pm_qos; diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c index 9404a678fa6fe4849abd9ce06555965926318e45..f8d616f08b510fe093b9fe6e47683c2bb4b63137 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c @@ -549,8 +549,7 @@ static int atomisp_querycap(struct file *file, void *fh, strscpy(cap->driver, DRIVER, sizeof(cap->driver)); strscpy(cap->card, CARD, sizeof(cap->card)); - snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", - pci_name(isp->pdev)); + snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", dev_name(isp->dev)); return 0; } @@ -1635,6 +1634,7 @@ static int atomisp_streamon(struct file *file, void *fh, struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); struct atomisp_sub_device *asd = pipe->asd; struct atomisp_device *isp = video_get_drvdata(vdev); + struct pci_dev *pdev = to_pci_dev(isp->dev); enum ia_css_pipe_id css_pipe_id; unsigned int sensor_start_stream; unsigned int wdt_duration = ATOMISP_ISP_TIMEOUT_DURATION; @@ -1844,9 +1844,8 @@ static int atomisp_streamon(struct file *file, void *fh, /* Enable the CSI interface on ANN B0/K0 */ if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) { - pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL, - isp->saved_regs.csi_control | - MRFLD_PCI_CSI_CONTROL_CSI_READY); + pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL, + isp->saved_regs.csi_control | MRFLD_PCI_CSI_CONTROL_CSI_READY); } /* stream on the sensor */ @@ -1891,6 +1890,7 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); + struct pci_dev *pdev = to_pci_dev(isp->dev); struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); struct atomisp_sub_device *asd = pipe->asd; struct atomisp_video_pipe *capture_pipe = NULL; @@ -2076,9 +2076,8 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) /* Disable the CSI interface on ANN B0/K0 */ if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) { - pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL, - isp->saved_regs.csi_control & - ~MRFLD_PCI_CSI_CONTROL_CSI_READY); + pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL, + isp->saved_regs.csi_control & ~MRFLD_PCI_CSI_CONTROL_CSI_READY); } if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, false)) @@ -2111,8 +2110,8 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) } /* disable PUNIT/ISP acknowlede/handshake - SRSE=3 */ - pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control | - MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK); + pci_write_config_dword(pdev, PCI_I_CONTROL, + isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK); dev_err(isp->dev, "atomisp_reset"); atomisp_reset(isp); for (i = 0; i < isp->num_of_streams; i++) { diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index d36809a0182c058104489c54646ae7c032efb72c..a000a1e316f78d705f320cefd8f855b017ccd655 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -127,8 +127,6 @@ MODULE_PARM_DESC(pad_h, "extra data for ISP processing"); struct device *atomisp_dev; -void __iomem *atomisp_io_base; - static const struct atomisp_freq_scaling_rule dfs_rules_merr[] = { { .width = ISP_FREQ_RULE_ANY, @@ -512,30 +510,27 @@ void atomisp_acc_unregister(struct atomisp_acc_pipe *video) static int atomisp_save_iunit_reg(struct atomisp_device *isp) { - struct pci_dev *dev = isp->pdev; + struct pci_dev *pdev = to_pci_dev(isp->dev); dev_dbg(isp->dev, "%s\n", __func__); - pci_read_config_word(dev, PCI_COMMAND, &isp->saved_regs.pcicmdsts); + pci_read_config_word(pdev, PCI_COMMAND, &isp->saved_regs.pcicmdsts); /* isp->saved_regs.ispmmadr is set from the atomisp_pci_probe() */ - pci_read_config_dword(dev, PCI_MSI_CAPID, &isp->saved_regs.msicap); - pci_read_config_dword(dev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr); - pci_read_config_word(dev, PCI_MSI_DATA, &isp->saved_regs.msi_data); - pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr); - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, - &isp->saved_regs.interrupt_control); - - pci_read_config_dword(dev, MRFLD_PCI_PMCS, - &isp->saved_regs.pmcs); + pci_read_config_dword(pdev, PCI_MSI_CAPID, &isp->saved_regs.msicap); + pci_read_config_dword(pdev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr); + pci_read_config_word(pdev, PCI_MSI_DATA, &isp->saved_regs.msi_data); + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr); + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &isp->saved_regs.interrupt_control); + + pci_read_config_dword(pdev, MRFLD_PCI_PMCS, &isp->saved_regs.pmcs); /* Ensure read/write combining is enabled. */ - pci_read_config_dword(dev, PCI_I_CONTROL, - &isp->saved_regs.i_control); + pci_read_config_dword(pdev, PCI_I_CONTROL, &isp->saved_regs.i_control); isp->saved_regs.i_control |= MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING | MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING; - pci_read_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL, + pci_read_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL, &isp->saved_regs.csi_access_viol); - pci_read_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL, + pci_read_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL, &isp->saved_regs.csi_rcomp_config); /* * Hardware bugs require setting CSI_HS_OVR_CLK_GATE_ON_UPDATE. @@ -545,65 +540,58 @@ static int atomisp_save_iunit_reg(struct atomisp_device *isp) * is missed, and IUNIT can hang. * For both issues, setting this bit is a workaround. */ - isp->saved_regs.csi_rcomp_config |= - MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE; - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, + isp->saved_regs.csi_rcomp_config |= MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE; + pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, &isp->saved_regs.csi_afe_dly); - pci_read_config_dword(dev, MRFLD_PCI_CSI_CONTROL, + pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &isp->saved_regs.csi_control); if (isp->media_dev.hw_revision >= (ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT)) - isp->saved_regs.csi_control |= - MRFLD_PCI_CSI_CONTROL_PARPATHEN; + isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_PARPATHEN; /* * On CHT CSI_READY bit should be enabled before stream on */ if (IS_CHT && (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0))) - isp->saved_regs.csi_control |= - MRFLD_PCI_CSI_CONTROL_CSI_READY; - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL, + isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_CSI_READY; + pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL, &isp->saved_regs.csi_afe_rcomp_config); - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL, + pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL, &isp->saved_regs.csi_afe_hs_control); - pci_read_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL, + pci_read_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL, &isp->saved_regs.csi_deadline_control); return 0; } static int __maybe_unused atomisp_restore_iunit_reg(struct atomisp_device *isp) { - struct pci_dev *dev = isp->pdev; + struct pci_dev *pdev = to_pci_dev(isp->dev); dev_dbg(isp->dev, "%s\n", __func__); - pci_write_config_word(dev, PCI_COMMAND, isp->saved_regs.pcicmdsts); - pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, - isp->saved_regs.ispmmadr); - pci_write_config_dword(dev, PCI_MSI_CAPID, isp->saved_regs.msicap); - pci_write_config_dword(dev, PCI_MSI_ADDR, isp->saved_regs.msi_addr); - pci_write_config_word(dev, PCI_MSI_DATA, isp->saved_regs.msi_data); - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, isp->saved_regs.intr); - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, - isp->saved_regs.interrupt_control); - pci_write_config_dword(dev, PCI_I_CONTROL, - isp->saved_regs.i_control); - - pci_write_config_dword(dev, MRFLD_PCI_PMCS, - isp->saved_regs.pmcs); - pci_write_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL, + pci_write_config_word(pdev, PCI_COMMAND, isp->saved_regs.pcicmdsts); + pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, isp->saved_regs.ispmmadr); + pci_write_config_dword(pdev, PCI_MSI_CAPID, isp->saved_regs.msicap); + pci_write_config_dword(pdev, PCI_MSI_ADDR, isp->saved_regs.msi_addr); + pci_write_config_word(pdev, PCI_MSI_DATA, isp->saved_regs.msi_data); + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, isp->saved_regs.intr); + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, isp->saved_regs.interrupt_control); + pci_write_config_dword(pdev, PCI_I_CONTROL, isp->saved_regs.i_control); + + pci_write_config_dword(pdev, MRFLD_PCI_PMCS, isp->saved_regs.pmcs); + pci_write_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL, isp->saved_regs.csi_access_viol); - pci_write_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL, + pci_write_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL, isp->saved_regs.csi_rcomp_config); - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, + pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, isp->saved_regs.csi_afe_dly); - pci_write_config_dword(dev, MRFLD_PCI_CSI_CONTROL, + pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, isp->saved_regs.csi_control); - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL, + pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL, isp->saved_regs.csi_afe_rcomp_config); - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL, + pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL, isp->saved_regs.csi_afe_hs_control); - pci_write_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL, + pci_write_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL, isp->saved_regs.csi_deadline_control); /* @@ -619,7 +607,7 @@ static int __maybe_unused atomisp_restore_iunit_reg(struct atomisp_device *isp) static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp) { - struct pci_dev *dev = isp->pdev; + struct pci_dev *pdev = to_pci_dev(isp->dev); u32 irq; unsigned long flags; @@ -635,11 +623,11 @@ static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp) * So, here we need to check if there is any pending * IRQ, if so, waiting for it to be served */ - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); irq = irq & 1 << INTR_IIR; - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq); - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); if (!(irq & (1 << INTR_IIR))) goto done; @@ -652,11 +640,11 @@ static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp) spin_unlock_irqrestore(&isp->lock, flags); return -EAGAIN; } else { - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); irq = irq & 1 << INTR_IIR; - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq); - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); if (!(irq & (1 << INTR_IIR))) { atomisp_css2_hw_store_32(MRFLD_INTR_ENABLE_REG, 0x0); goto done; @@ -675,11 +663,11 @@ static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp) * to IIR. It could block subsequent interrupt messages. * HW sighting:4568410. */ - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); irq &= ~(1 << INTR_IER); - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq); - atomisp_msi_irq_uninit(isp, dev); + atomisp_msi_irq_uninit(isp); atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true); spin_unlock_irqrestore(&isp->lock, flags); @@ -755,7 +743,7 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable) /* Wait until ISPSSPM0 bit[25:24] shows the right value */ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &tmp); - tmp = (tmp & MRFLD_ISPSSPM0_ISPSSC_MASK) >> MRFLD_ISPSSPM0_ISPSSS_OFFSET; + tmp = (tmp >> MRFLD_ISPSSPM0_ISPSSS_OFFSET) & MRFLD_ISPSSPM0_ISPSSC_MASK; if (tmp == val) { trace_ipu_cstate(enable); return 0; @@ -778,15 +766,13 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable) /* Workaround for pmu_nc_set_power_state not ready in MRFLD */ int atomisp_mrfld_power_down(struct atomisp_device *isp) { -// FIXME: at least with ISP2401, enabling this code causes the driver to break - return 0 && atomisp_mrfld_power(isp, false); + return atomisp_mrfld_power(isp, false); } /* Workaround for pmu_nc_set_power_state not ready in MRFLD */ int atomisp_mrfld_power_up(struct atomisp_device *isp) { -// FIXME: at least with ISP2401, enabling this code causes the driver to break - return 0 && atomisp_mrfld_power(isp, true); + return atomisp_mrfld_power(isp, true); } int atomisp_runtime_suspend(struct device *dev) @@ -902,6 +888,7 @@ static int __maybe_unused atomisp_resume(struct device *dev) int atomisp_csi_lane_config(struct atomisp_device *isp) { + struct pci_dev *pdev = to_pci_dev(isp->dev); static const struct { u8 code; u8 lanes[MRFLD_PORT_NUM]; @@ -1003,7 +990,7 @@ int atomisp_csi_lane_config(struct atomisp_device *isp) return -EINVAL; } - pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &csi_control); + pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &csi_control); csi_control &= ~port_config_mask; csi_control |= (portconfigs[i].code << MRFLD_PORT_CONFIGCODE_SHIFT) | (portconfigs[i].lanes[0] ? 0 : (1 << MRFLD_PORT1_ENABLE_SHIFT)) @@ -1013,7 +1000,7 @@ int atomisp_csi_lane_config(struct atomisp_device *isp) | (((1 << portconfigs[i].lanes[1]) - 1) << MRFLD_PORT2_LANES_SHIFT) | (((1 << portconfigs[i].lanes[2]) - 1) << port3_lanes_shift); - pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, csi_control); + pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, csi_control); dev_dbg(isp->dev, "%s: the portconfig is %d-%d-%d, CSI_CONTROL is 0x%08X\n", @@ -1440,8 +1427,7 @@ atomisp_load_firmware(struct atomisp_device *isp) * Check for flags the driver was compiled with against the PCI * device. Always returns true on other than ISP 2400. */ -static bool is_valid_device(struct pci_dev *dev, - const struct pci_device_id *id) +static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned int a0_max_id = 0; const char *name; @@ -1465,14 +1451,14 @@ static bool is_valid_device(struct pci_dev *dev, name = "Cherrytrail"; break; default: - dev_err(&dev->dev, "%s: unknown device ID %x04:%x04\n", + dev_err(&pdev->dev, "%s: unknown device ID %x04:%x04\n", product, id->vendor, id->device); return false; } - if (dev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) { - dev_err(&dev->dev, "%s revision %d is not unsupported\n", - name, dev->revision); + if (pdev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) { + dev_err(&pdev->dev, "%s revision %d is not unsupported\n", + name, pdev->revision); return false; } @@ -1483,22 +1469,20 @@ static bool is_valid_device(struct pci_dev *dev, #if defined(ISP2400) if (IS_ISP2401) { - dev_err(&dev->dev, "Support for %s (ISP2401) was disabled at compile time\n", + dev_err(&pdev->dev, "Support for %s (ISP2401) was disabled at compile time\n", name); return false; } #else if (!IS_ISP2401) { - dev_err(&dev->dev, "Support for %s (ISP2400) was disabled at compile time\n", + dev_err(&pdev->dev, "Support for %s (ISP2400) was disabled at compile time\n", name); return false; } #endif - dev_info(&dev->dev, "Detected %s version %d (ISP240%c) on %s\n", - name, dev->revision, - IS_ISP2401 ? '1' : '0', - product); + dev_info(&pdev->dev, "Detected %s version %d (ISP240%c) on %s\n", + name, pdev->revision, IS_ISP2401 ? '1' : '0', product); return true; } @@ -1538,66 +1522,60 @@ static int init_atomisp_wdts(struct atomisp_device *isp) #define ATOM_ISP_PCI_BAR 0 -static int atomisp_pci_probe(struct pci_dev *dev, - const struct pci_device_id *id) +static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const struct atomisp_platform_data *pdata; struct atomisp_device *isp; unsigned int start; - void __iomem *base; int err, val; u32 irq; - if (!is_valid_device(dev, id)) + if (!is_valid_device(pdev, id)) return -ENODEV; /* Pointer to struct device. */ - atomisp_dev = &dev->dev; + atomisp_dev = &pdev->dev; pdata = atomisp_get_platform_data(); if (!pdata) - dev_warn(&dev->dev, "no platform data available\n"); + dev_warn(&pdev->dev, "no platform data available\n"); - err = pcim_enable_device(dev); + err = pcim_enable_device(pdev); if (err) { - dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n", - err); + dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n", err); return err; } - start = pci_resource_start(dev, ATOM_ISP_PCI_BAR); - dev_dbg(&dev->dev, "start: 0x%x\n", start); + start = pci_resource_start(pdev, ATOM_ISP_PCI_BAR); + dev_dbg(&pdev->dev, "start: 0x%x\n", start); - err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev)); + err = pcim_iomap_regions(pdev, 1 << ATOM_ISP_PCI_BAR, pci_name(pdev)); if (err) { - dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n", - err); + dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", err); goto ioremap_fail; } - base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR]; - dev_dbg(&dev->dev, "base: %p\n", base); - - atomisp_io_base = base; - - dev_dbg(&dev->dev, "atomisp_io_base: %p\n", atomisp_io_base); - - isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL); + isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL); if (!isp) { err = -ENOMEM; goto atomisp_dev_alloc_fail; } - isp->pdev = dev; - isp->dev = &dev->dev; + + isp->dev = &pdev->dev; + isp->base = pcim_iomap_table(pdev)[ATOM_ISP_PCI_BAR]; isp->sw_contex.power_state = ATOM_ISP_POWER_UP; isp->saved_regs.ispmmadr = start; + dev_dbg(&pdev->dev, "atomisp mmio base: %p\n", isp->base); + rt_mutex_init(&isp->mutex); mutex_init(&isp->streamoff_mutex); spin_lock_init(&isp->lock); /* This is not a true PCI device on SoC, so the delay is not needed. */ - isp->pdev->d3_delay = 0; + pdev->d3_delay = 0; + + pci_set_drvdata(pdev, isp); switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) { case ATOMISP_PCI_DEVICE_SOC_MRFLD: @@ -1648,15 +1626,14 @@ static int atomisp_pci_probe(struct pci_dev *dev, * have specs yet for exactly how it varies. Default to * BYT-CR but let provisioning set it via EFI variable */ - isp->hpll_freq = gmin_get_var_int(&dev->dev, false, "HpllFreq", - HPLL_FREQ_2000MHZ); + isp->hpll_freq = gmin_get_var_int(&pdev->dev, false, "HpllFreq", HPLL_FREQ_2000MHZ); /* * for BYT/CHT we are put isp into D3cold to avoid pci registers access * in power off. Set d3cold_delay to 0 since default 100ms is not * necessary. */ - isp->pdev->d3cold_delay = 0; + pdev->d3cold_delay = 0; break; case ATOMISP_PCI_DEVICE_SOC_ANN: isp->media_dev.hw_revision = ( @@ -1666,7 +1643,7 @@ static int atomisp_pci_probe(struct pci_dev *dev, ATOMISP_HW_REVISION_ISP2401_LEGACY #endif << ATOMISP_HW_REVISION_SHIFT); - isp->media_dev.hw_revision |= isp->pdev->revision < 2 ? + isp->media_dev.hw_revision |= pdev->revision < 2 ? ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0; isp->dfs = &dfs_config_merr; isp->hpll_freq = HPLL_FREQ_1600MHZ; @@ -1679,13 +1656,13 @@ static int atomisp_pci_probe(struct pci_dev *dev, ATOMISP_HW_REVISION_ISP2401_LEGACY #endif << ATOMISP_HW_REVISION_SHIFT); - isp->media_dev.hw_revision |= isp->pdev->revision < 2 ? + isp->media_dev.hw_revision |= pdev->revision < 2 ? ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0; isp->dfs = &dfs_config_cht; - isp->pdev->d3cold_delay = 0; + pdev->d3cold_delay = 0; - iosf_mbi_read(CCK_PORT, MBI_REG_READ, CCK_FUSE_REG_0, &val); + iosf_mbi_read(BT_MBI_UNIT_CCK, MBI_REG_READ, CCK_FUSE_REG_0, &val); switch (val & CCK_FUSE_HPLL_FREQ_MASK) { case 0x00: isp->hpll_freq = HPLL_FREQ_800MHZ; @@ -1698,18 +1675,16 @@ static int atomisp_pci_probe(struct pci_dev *dev, break; default: isp->hpll_freq = HPLL_FREQ_1600MHZ; - dev_warn(isp->dev, - "read HPLL from cck failed. Default to 1600 MHz.\n"); + dev_warn(&pdev->dev, "read HPLL from cck failed. Default to 1600 MHz.\n"); } break; default: - dev_err(&dev->dev, "un-supported IUNIT device\n"); + dev_err(&pdev->dev, "un-supported IUNIT device\n"); err = -ENODEV; goto atomisp_dev_alloc_fail; } - dev_info(&dev->dev, "ISP HPLL frequency base = %d MHz\n", - isp->hpll_freq); + dev_info(&pdev->dev, "ISP HPLL frequency base = %d MHz\n", isp->hpll_freq); isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY; @@ -1718,30 +1693,28 @@ static int atomisp_pci_probe(struct pci_dev *dev, isp->firmware = atomisp_load_firmware(isp); if (!isp->firmware) { err = -ENOENT; - dev_dbg(&dev->dev, "Firmware load failed\n"); + dev_dbg(&pdev->dev, "Firmware load failed\n"); goto load_fw_fail; } - err = sh_css_check_firmware_version(isp->dev, - isp->firmware->data); + err = sh_css_check_firmware_version(isp->dev, isp->firmware->data); if (err) { - dev_dbg(&dev->dev, "Firmware version check failed\n"); + dev_dbg(&pdev->dev, "Firmware version check failed\n"); goto fw_validation_fail; } } else { - dev_info(&dev->dev, "Firmware load will be deferred\n"); + dev_info(&pdev->dev, "Firmware load will be deferred\n"); } - pci_set_master(dev); - pci_set_drvdata(dev, isp); + pci_set_master(pdev); - err = pci_enable_msi(dev); + err = pci_enable_msi(pdev); if (err) { - dev_err(&dev->dev, "Failed to enable msi (%d)\n", err); + dev_err(&pdev->dev, "Failed to enable msi (%d)\n", err); goto enable_msi_fail; } - atomisp_msi_irq_init(isp, dev); + atomisp_msi_irq_init(isp); cpu_latency_qos_add_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE); @@ -1762,8 +1735,7 @@ static int atomisp_pci_probe(struct pci_dev *dev, * Workaround for imbalance data eye issue which is observed * on TNG B0. */ - pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, - &csi_afe_trim); + pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, &csi_afe_trim); csi_afe_trim &= ~((MRFLD_PCI_CSI_HSRXCLKTRIM_MASK << MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) | (MRFLD_PCI_CSI_HSRXCLKTRIM_MASK << @@ -1776,20 +1748,18 @@ static int atomisp_pci_probe(struct pci_dev *dev, MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) | (MRFLD_PCI_CSI3_HSRXCLKTRIM << MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT); - pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, - csi_afe_trim); + pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, csi_afe_trim); } err = atomisp_initialize_modules(isp); if (err < 0) { - dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err); + dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err); goto initialize_modules_fail; } err = atomisp_register_entities(isp); if (err < 0) { - dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n", - err); + dev_err(&pdev->dev, "atomisp_register_entities failed (%d)\n", err); goto register_entities_fail; } err = atomisp_create_pads_links(isp); @@ -1802,24 +1772,24 @@ static int atomisp_pci_probe(struct pci_dev *dev, /* save the iunit context only once after all the values are init'ed. */ atomisp_save_iunit_reg(isp); - pm_runtime_put_noidle(&dev->dev); - pm_runtime_allow(&dev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(&pdev->dev); hmm_init_mem_stat(repool_pgnr, dypool_enable, dypool_pgnr); err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED); if (err) { - dev_err(&dev->dev, "Failed to register reserved memory pool.\n"); + dev_err(&pdev->dev, "Failed to register reserved memory pool.\n"); goto hmm_pool_fail; } /* Init ISP memory management */ hmm_init(); - err = devm_request_threaded_irq(&dev->dev, dev->irq, + err = devm_request_threaded_irq(&pdev->dev, pdev->irq, atomisp_isr, atomisp_isr_thread, IRQF_SHARED, "isp_irq", isp); if (err) { - dev_err(&dev->dev, "Failed to request irq (%d)\n", err); + dev_err(&pdev->dev, "Failed to request irq (%d)\n", err); goto request_irq_fail; } @@ -1827,23 +1797,23 @@ static int atomisp_pci_probe(struct pci_dev *dev, if (!defer_fw_load) { err = atomisp_css_load_firmware(isp); if (err) { - dev_err(&dev->dev, "Failed to init css.\n"); + dev_err(&pdev->dev, "Failed to init css.\n"); goto css_init_fail; } } else { - dev_dbg(&dev->dev, "Skip css init.\n"); + dev_dbg(&pdev->dev, "Skip css init.\n"); } /* Clear FW image from memory */ release_firmware(isp->firmware); isp->firmware = NULL; isp->css_env.isp_css_fw.data = NULL; - atomisp_drvfs_init(&dev->driver->driver, isp); + atomisp_drvfs_init(isp); return 0; css_init_fail: - devm_free_irq(&dev->dev, dev->irq, isp); + devm_free_irq(&pdev->dev, pdev->irq, isp); request_irq_fail: hmm_cleanup(); hmm_pool_unregister(HMM_POOL_TYPE_RESERVED); @@ -1856,8 +1826,8 @@ static int atomisp_pci_probe(struct pci_dev *dev, atomisp_uninitialize_modules(isp); initialize_modules_fail: cpu_latency_qos_remove_request(&isp->pm_qos); - atomisp_msi_irq_uninit(isp, dev); - pci_disable_msi(dev); + atomisp_msi_irq_uninit(isp); + pci_disable_msi(pdev); enable_msi_fail: fw_validation_fail: release_firmware(isp->firmware); @@ -1869,35 +1839,34 @@ static int atomisp_pci_probe(struct pci_dev *dev, * The following lines have been copied from atomisp suspend path */ - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); irq = irq & 1 << INTR_IIR; - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq); - pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq); + pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq); irq &= ~(1 << INTR_IER); - pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq); + pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq); - atomisp_msi_irq_uninit(isp, dev); + atomisp_msi_irq_uninit(isp); atomisp_ospm_dphy_down(isp); /* Address later when we worry about the ...field chips */ if (IS_ENABLED(CONFIG_PM) && atomisp_mrfld_power_down(isp)) - dev_err(&dev->dev, "Failed to switch off ISP\n"); + dev_err(&pdev->dev, "Failed to switch off ISP\n"); atomisp_dev_alloc_fail: - pcim_iounmap_regions(dev, 1 << ATOM_ISP_PCI_BAR); + pcim_iounmap_regions(pdev, 1 << ATOM_ISP_PCI_BAR); ioremap_fail: return err; } -static void atomisp_pci_remove(struct pci_dev *dev) +static void atomisp_pci_remove(struct pci_dev *pdev) { - struct atomisp_device *isp = (struct atomisp_device *) - pci_get_drvdata(dev); + struct atomisp_device *isp = pci_get_drvdata(pdev); - dev_info(&dev->dev, "Removing atomisp driver\n"); + dev_info(&pdev->dev, "Removing atomisp driver\n"); atomisp_drvfs_exit(); @@ -1906,11 +1875,11 @@ static void atomisp_pci_remove(struct pci_dev *dev) ia_css_unload_firmware(); hmm_cleanup(); - pm_runtime_forbid(&dev->dev); - pm_runtime_get_noresume(&dev->dev); + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); cpu_latency_qos_remove_request(&isp->pm_qos); - atomisp_msi_irq_uninit(isp, dev); + atomisp_msi_irq_uninit(isp); atomisp_unregister_entities(isp); destroy_workqueue(isp->wdt_work_queue); diff --git a/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c b/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c index cf02737cf8d4460fced32c89d40f7cc9c3ba8f5d..a9c881631f4a320bfb02f2881ba58bbc737cc192 100644 --- a/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c +++ b/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c @@ -48,7 +48,7 @@ static struct ia_css_refcount_entry *refcount_find_entry(ia_css_ptr ptr, return NULL; if (!myrefcount.items) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "refcount_find_entry(): Ref count not initialized!\n"); + "%s(): Ref count not initialized!\n", __func__); return NULL; } @@ -73,12 +73,12 @@ int ia_css_refcount_init(uint32_t size) if (size == 0) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_init(): Size of 0 for Ref count init!\n"); + "%s(): Size of 0 for Ref count init!\n", __func__); return -EINVAL; } if (myrefcount.items) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_init(): Ref count is already initialized\n"); + "%s(): Ref count is already initialized\n", __func__); return -EINVAL; } myrefcount.items = @@ -99,7 +99,7 @@ void ia_css_refcount_uninit(void) u32 i; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_uninit() entry\n"); + "%s() entry\n", __func__); for (i = 0; i < myrefcount.size; i++) { /* driver verifier tool has issues with &arr[i] and prefers arr + i; as these are actually equivalent @@ -120,7 +120,7 @@ void ia_css_refcount_uninit(void) myrefcount.items = NULL; myrefcount.size = 0; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_uninit() leave\n"); + "%s() leave\n", __func__); } ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr) @@ -133,7 +133,7 @@ ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr) entry = refcount_find_entry(ptr, false); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_increment(%x) 0x%x\n", id, ptr); + "%s(%x) 0x%x\n", __func__, id, ptr); if (!entry) { entry = refcount_find_entry(ptr, true); @@ -145,7 +145,7 @@ ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr) if (entry->id != id) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "ia_css_refcount_increment(): Ref count IDS do not match!\n"); + "%s(): Ref count IDS do not match!\n", __func__); return mmgr_NULL; } @@ -165,7 +165,7 @@ bool ia_css_refcount_decrement(s32 id, ia_css_ptr ptr) struct ia_css_refcount_entry *entry; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_decrement(%x) 0x%x\n", id, ptr); + "%s(%x) 0x%x\n", __func__, id, ptr); if (ptr == mmgr_NULL) return false; @@ -175,7 +175,7 @@ bool ia_css_refcount_decrement(s32 id, ia_css_ptr ptr) if (entry) { if (entry->id != id) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, - "ia_css_refcount_decrement(): Ref count IDS do not match!\n"); + "%s(): Ref count IDS do not match!\n", __func__); return false; } if (entry->count > 0) { @@ -225,8 +225,8 @@ void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr) u32 count = 0; assert(clear_func_ptr); - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear(%x)\n", - id); + ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(%x)\n", + __func__, id); for (i = 0; i < myrefcount.size; i++) { /* driver verifier tool has issues with &arr[i] @@ -236,14 +236,14 @@ void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr) entry = myrefcount.items + i; if ((entry->data != mmgr_NULL) && (entry->id == id)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_clear: %x: 0x%x\n", + "%s: %x: 0x%x\n", __func__, id, entry->data); if (clear_func_ptr) { /* clear using provided function */ clear_func_ptr(entry->data); } else { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_clear: using hmm_free: no clear_func\n"); + "%s: using hmm_free: no clear_func\n", __func__); hmm_free(entry->data); } @@ -260,7 +260,7 @@ void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr) } } ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, - "ia_css_refcount_clear(%x): cleared %d\n", id, + "%s(%x): cleared %d\n", __func__, id, count); } diff --git a/drivers/staging/media/atomisp/pci/hive_types.h b/drivers/staging/media/atomisp/pci/hive_types.h index addda9b81d7bd5044a474143fbfac30fa77f5fc9..4b8a679fb6722da9749dcf085cb8cf303daed96c 100644 --- a/drivers/staging/media/atomisp/pci/hive_types.h +++ b/drivers/staging/media/atomisp/pci/hive_types.h @@ -52,32 +52,14 @@ typedef unsigned short hive_uint16; typedef unsigned int hive_uint32; typedef unsigned long long hive_uint64; -/* by default assume 32 bit master port (both data and address) */ -#ifndef HRT_DATA_WIDTH -#define HRT_DATA_WIDTH 32 -#endif -#ifndef HRT_ADDRESS_WIDTH -#define HRT_ADDRESS_WIDTH 32 -#endif - +#define HRT_DATA_WIDTH 32 +#define HRT_ADDRESS_WIDTH 64 #define HRT_DATA_BYTES (HRT_DATA_WIDTH / 8) #define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH / 8) +#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3) -#if HRT_DATA_WIDTH == 64 -typedef hive_uint64 hrt_data; -#elif HRT_DATA_WIDTH == 32 typedef hive_uint32 hrt_data; -#else -#error data width not supported -#endif - -#if HRT_ADDRESS_WIDTH == 64 typedef hive_uint64 hrt_address; -#elif HRT_ADDRESS_WIDTH == 32 -typedef hive_uint32 hrt_address; -#else -#error adddres width not supported -#endif /* use 64 bit addresses in simulation, where possible */ typedef hive_uint64 hive_sim_address; diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c index 42fef17798622f18a3ce0767d6d97d44144de518..2bd39b4939f16d2d3feb5b448285aa3ff6081633 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c @@ -735,11 +735,11 @@ ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr) void hmm_show_mem_stat(const char *func, const int line) { - trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n", - hmm_mem_stat.tol_cnt, - hmm_mem_stat.usr_size, hmm_mem_stat.res_size, - hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size, - hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size); + pr_info("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n", + hmm_mem_stat.tol_cnt, + hmm_mem_stat.usr_size, hmm_mem_stat.res_size, + hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size, + hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size); } void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr) diff --git a/drivers/staging/media/atomisp/pci/isp2400_system_global.h b/drivers/staging/media/atomisp/pci/isp2400_system_global.h index d87ddf1d2fe9b1ebc06b99fb98f6b09ef56fd5c0..74fff465e8e8cb0497e29f3a5a402e61807de4d2 100644 --- a/drivers/staging/media/atomisp/pci/isp2400_system_global.h +++ b/drivers/staging/media/atomisp/pci/isp2400_system_global.h @@ -13,306 +13,4 @@ * more details. */ -#ifndef __SYSTEM_GLOBAL_H_INCLUDED__ -#define __SYSTEM_GLOBAL_H_INCLUDED__ - -#include -#include - -/* - * The longest allowed (uninteruptible) bus transfer, does not - * take stalling into account - */ -#define HIVE_ISP_MAX_BURST_LENGTH 1024 - -/* - * Maximum allowed burst length in words for the ISP DMA - */ -#define ISP_DMA_MAX_BURST_LENGTH 128 - -/* - * Create a list of HAS and IS properties that defines the system - * - * The configuration assumes the following - * - The system is hetereogeneous; Multiple cells and devices classes - * - The cell and device instances are homogeneous, each device type - * belongs to the same class - * - Device instances supporting a subset of the class capabilities are - * allowed - * - * We could manage different device classes through the enumerated - * lists (C) or the use of classes (C++), but that is presently not - * fully supported - * - * N.B. the 3 input formatters are of 2 different classess - */ - #define USE_INPUT_SYSTEM_VERSION_2 - -#define HAS_MMU_VERSION_2 -#define HAS_DMA_VERSION_2 -#define HAS_GDC_VERSION_2 -#define HAS_VAMEM_VERSION_2 -#define HAS_HMEM_VERSION_1 -#define HAS_BAMEM_VERSION_2 -#define HAS_IRQ_VERSION_2 -#define HAS_IRQ_MAP_VERSION_2 -#define HAS_INPUT_FORMATTER_VERSION_2 -/* 2401: HAS_INPUT_SYSTEM_VERSION_2401 */ -#define HAS_INPUT_SYSTEM_VERSION_2 -#define HAS_BUFFERED_SENSOR -#define HAS_FIFO_MONITORS_VERSION_2 -/* #define HAS_GP_REGS_VERSION_2 */ -#define HAS_GP_DEVICE_VERSION_2 -#define HAS_GPIO_VERSION_1 -#define HAS_TIMED_CTRL_VERSION_1 -#define HAS_RX_VERSION_2 - -#define DMA_DDR_TO_VAMEM_WORKAROUND -#define DMA_DDR_TO_HMEM_WORKAROUND - -/* - * Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply - */ -#define HRT_VADDRESS_WIDTH 32 -//#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property*/ -#define HRT_DATA_WIDTH 32 - -#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3) -#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8) - -/* The main bus connecting all devices */ -#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH -#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES - -/* per-frame parameter handling support */ -#define SH_CSS_ENABLE_PER_FRAME_PARAMS - -typedef u32 hrt_bus_align_t; - -/* - * Enumerate the devices, device access through the API is by ID, through the DLI by address - * The enumerator terminators are used to size the wiring arrays and as an exception value. - */ -typedef enum { - DDR0_ID = 0, - N_DDR_ID -} ddr_ID_t; - -typedef enum { - ISP0_ID = 0, - N_ISP_ID -} isp_ID_t; - -typedef enum { - SP0_ID = 0, - N_SP_ID -} sp_ID_t; - -typedef enum { - MMU0_ID = 0, - MMU1_ID, - N_MMU_ID -} mmu_ID_t; - -typedef enum { - DMA0_ID = 0, - N_DMA_ID -} dma_ID_t; - -typedef enum { - GDC0_ID = 0, - GDC1_ID, - N_GDC_ID -} gdc_ID_t; - -#define N_GDC_ID_CPP 2 // this extra define is needed because we want to use it also in the preprocessor, and that doesn't work with enums. - -typedef enum { - VAMEM0_ID = 0, - VAMEM1_ID, - VAMEM2_ID, - N_VAMEM_ID -} vamem_ID_t; - -typedef enum { - BAMEM0_ID = 0, - N_BAMEM_ID -} bamem_ID_t; - -typedef enum { - HMEM0_ID = 0, - N_HMEM_ID -} hmem_ID_t; - -/* -typedef enum { - IRQ0_ID = 0, - N_IRQ_ID -} irq_ID_t; -*/ - -typedef enum { - IRQ0_ID = 0, // GP IRQ block - IRQ1_ID, // Input formatter - IRQ2_ID, // input system - IRQ3_ID, // input selector - N_IRQ_ID -} irq_ID_t; - -typedef enum { - FIFO_MONITOR0_ID = 0, - N_FIFO_MONITOR_ID -} fifo_monitor_ID_t; - -/* - * Deprecated: Since all gp_reg instances are different - * and put in the address maps of other devices we cannot - * enumerate them as that assumes the instrances are the - * same. - * - * We define a single GP_DEVICE containing all gp_regs - * w.r.t. a single base address - * -typedef enum { - GP_REGS0_ID = 0, - N_GP_REGS_ID -} gp_regs_ID_t; - */ -typedef enum { - GP_DEVICE0_ID = 0, - N_GP_DEVICE_ID -} gp_device_ID_t; - -typedef enum { - GP_TIMER0_ID = 0, - GP_TIMER1_ID, - GP_TIMER2_ID, - GP_TIMER3_ID, - GP_TIMER4_ID, - GP_TIMER5_ID, - GP_TIMER6_ID, - GP_TIMER7_ID, - N_GP_TIMER_ID -} gp_timer_ID_t; - -typedef enum { - GPIO0_ID = 0, - N_GPIO_ID -} gpio_ID_t; - -typedef enum { - TIMED_CTRL0_ID = 0, - N_TIMED_CTRL_ID -} timed_ctrl_ID_t; - -typedef enum { - INPUT_FORMATTER0_ID = 0, - INPUT_FORMATTER1_ID, - INPUT_FORMATTER2_ID, - INPUT_FORMATTER3_ID, - N_INPUT_FORMATTER_ID -} input_formatter_ID_t; - -/* The IF RST is outside the IF */ -#define INPUT_FORMATTER0_SRST_OFFSET 0x0824 -#define INPUT_FORMATTER1_SRST_OFFSET 0x0624 -#define INPUT_FORMATTER2_SRST_OFFSET 0x0424 -#define INPUT_FORMATTER3_SRST_OFFSET 0x0224 - -#define INPUT_FORMATTER0_SRST_MASK 0x0001 -#define INPUT_FORMATTER1_SRST_MASK 0x0002 -#define INPUT_FORMATTER2_SRST_MASK 0x0004 -#define INPUT_FORMATTER3_SRST_MASK 0x0008 - -typedef enum { - INPUT_SYSTEM0_ID = 0, - N_INPUT_SYSTEM_ID -} input_system_ID_t; - -typedef enum { - RX0_ID = 0, - N_RX_ID -} rx_ID_t; - -enum mipi_port_id { - MIPI_PORT0_ID = 0, - MIPI_PORT1_ID, - MIPI_PORT2_ID, - N_MIPI_PORT_ID -}; - -#define N_RX_CHANNEL_ID 4 - -/* Generic port enumeration with an internal port type ID */ -typedef enum { - CSI_PORT0_ID = 0, - CSI_PORT1_ID, - CSI_PORT2_ID, - TPG_PORT0_ID, - PRBS_PORT0_ID, - FIFO_PORT0_ID, - MEMORY_PORT0_ID, - N_INPUT_PORT_ID -} input_port_ID_t; - -typedef enum { - CAPTURE_UNIT0_ID = 0, - CAPTURE_UNIT1_ID, - CAPTURE_UNIT2_ID, - ACQUISITION_UNIT0_ID, - DMA_UNIT0_ID, - CTRL_UNIT0_ID, - GPREGS_UNIT0_ID, - FIFO_UNIT0_ID, - IRQ_UNIT0_ID, - N_SUB_SYSTEM_ID -} sub_system_ID_t; - -#define N_CAPTURE_UNIT_ID 3 -#define N_ACQUISITION_UNIT_ID 1 -#define N_CTRL_UNIT_ID 1 - -enum ia_css_isp_memories { - IA_CSS_ISP_PMEM0 = 0, - IA_CSS_ISP_DMEM0, - IA_CSS_ISP_VMEM0, - IA_CSS_ISP_VAMEM0, - IA_CSS_ISP_VAMEM1, - IA_CSS_ISP_VAMEM2, - IA_CSS_ISP_HMEM0, - IA_CSS_SP_DMEM0, - IA_CSS_DDR, - N_IA_CSS_MEMORIES -}; - -#define IA_CSS_NUM_MEMORIES 9 -/* For driver compatibility */ -#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES -#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES - -#if 0 -typedef enum { - dev_chn, /* device channels, external resource */ - ext_mem, /* external memories */ - int_mem, /* internal memories */ - int_chn /* internal channels, user defined */ -} resource_type_t; - -/* if this enum is extended with other memory resources, pls also extend the function resource_to_memptr() */ -typedef enum { - vied_nci_dev_chn_dma_ext0, - int_mem_vmem0, - int_mem_dmem0 -} resource_id_t; - -/* enum listing the different memories within a program group. - This enum is used in the mem_ptr_t type */ -typedef enum { - buf_mem_invalid = 0, - buf_mem_vmem_prog0, - buf_mem_dmem_prog0 -} buf_mem_t; - -#endif -#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/isp2400_system_local.h b/drivers/staging/media/atomisp/pci/isp2400_system_local.h deleted file mode 100644 index 675b8e5bdcc1d7ddad52d26a81756162f4d15dab..0000000000000000000000000000000000000000 --- a/drivers/staging/media/atomisp/pci/isp2400_system_local.h +++ /dev/null @@ -1,321 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2010-2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SYSTEM_LOCAL_H_INCLUDED__ -#define __SYSTEM_LOCAL_H_INCLUDED__ - -#ifdef HRT_ISP_CSS_CUSTOM_HOST -#ifndef HRT_USE_VIR_ADDRS -#define HRT_USE_VIR_ADDRS -#endif -#endif - -#include "system_global.h" - -/* HRT assumes 32 by default (see Linux/include/hive_types.h), overrule it in case it is different */ -#undef HRT_ADDRESS_WIDTH -#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */ - -/* This interface is deprecated */ -#include "hive_types.h" - -/* - * Cell specific address maps - */ -#if HRT_ADDRESS_WIDTH == 64 - -#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */ - -/* DDR */ -static const hrt_address DDR_BASE[N_DDR_ID] = { - (hrt_address)0x0000000120000000ULL -}; - -/* ISP */ -static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { - (hrt_address)0x0000000000020000ULL -}; - -static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { - (hrt_address)0x0000000000200000ULL -}; - -static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { - (hrt_address)0x0000000000100000ULL -}; - -static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { - (hrt_address)0x00000000001C0000ULL, - (hrt_address)0x00000000001D0000ULL, - (hrt_address)0x00000000001E0000ULL -}; - -static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { - (hrt_address)0x00000000001F0000ULL -}; - -/* SP */ -static const hrt_address SP_CTRL_BASE[N_SP_ID] = { - (hrt_address)0x0000000000010000ULL -}; - -static const hrt_address SP_DMEM_BASE[N_SP_ID] = { - (hrt_address)0x0000000000300000ULL -}; - -static const hrt_address SP_PMEM_BASE[N_SP_ID] = { - (hrt_address)0x00000000000B0000ULL -}; - -/* MMU */ -/* - * MMU0_ID: The data MMU - * MMU1_ID: The icache MMU - */ -static const hrt_address MMU_BASE[N_MMU_ID] = { - (hrt_address)0x0000000000070000ULL, - (hrt_address)0x00000000000A0000ULL -}; - -/* DMA */ -static const hrt_address DMA_BASE[N_DMA_ID] = { - (hrt_address)0x0000000000040000ULL -}; - -/* IRQ */ -static const hrt_address IRQ_BASE[N_IRQ_ID] = { - (hrt_address)0x0000000000000500ULL, - (hrt_address)0x0000000000030A00ULL, - (hrt_address)0x000000000008C000ULL, - (hrt_address)0x0000000000090200ULL -}; - -/* - (hrt_address)0x0000000000000500ULL}; - */ - -/* GDC */ -static const hrt_address GDC_BASE[N_GDC_ID] = { - (hrt_address)0x0000000000050000ULL, - (hrt_address)0x0000000000060000ULL -}; - -/* FIFO_MONITOR (not a subset of GP_DEVICE) */ -static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { - (hrt_address)0x0000000000000000ULL -}; - -/* -static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { - (hrt_address)0x0000000000000000ULL}; - -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - (hrt_address)0x0000000000090000ULL}; -*/ - -/* GP_DEVICE (single base for all separate GP_REG instances) */ -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - (hrt_address)0x0000000000000000ULL -}; - -/*GP TIMER , all timer registers are inter-twined, - * so, having multiple base addresses for - * different timers does not help*/ -static const hrt_address GP_TIMER_BASE = - (hrt_address)0x0000000000000600ULL; -/* GPIO */ -static const hrt_address GPIO_BASE[N_GPIO_ID] = { - (hrt_address)0x0000000000000400ULL -}; - -/* TIMED_CTRL */ -static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { - (hrt_address)0x0000000000000100ULL -}; - -/* INPUT_FORMATTER */ -static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { - (hrt_address)0x0000000000030000ULL, - (hrt_address)0x0000000000030200ULL, - (hrt_address)0x0000000000030400ULL, - (hrt_address)0x0000000000030600ULL -}; /* memcpy() */ - -/* INPUT_SYSTEM */ -static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { - (hrt_address)0x0000000000080000ULL -}; - -/* (hrt_address)0x0000000000081000ULL, */ /* capture A */ -/* (hrt_address)0x0000000000082000ULL, */ /* capture B */ -/* (hrt_address)0x0000000000083000ULL, */ /* capture C */ -/* (hrt_address)0x0000000000084000ULL, */ /* Acquisition */ -/* (hrt_address)0x0000000000085000ULL, */ /* DMA */ -/* (hrt_address)0x0000000000089000ULL, */ /* ctrl */ -/* (hrt_address)0x000000000008A000ULL, */ /* GP regs */ -/* (hrt_address)0x000000000008B000ULL, */ /* FIFO */ -/* (hrt_address)0x000000000008C000ULL, */ /* IRQ */ - -/* RX, the MIPI lane control regs start at offset 0 */ -static const hrt_address RX_BASE[N_RX_ID] = { - (hrt_address)0x0000000000080100ULL -}; - -#elif HRT_ADDRESS_WIDTH == 32 - -#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */ - -/* DDR : Attention, this value not defined in 32-bit */ -static const hrt_address DDR_BASE[N_DDR_ID] = { - (hrt_address)0x00000000UL -}; - -/* ISP */ -static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { - (hrt_address)0x00020000UL -}; - -static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { - (hrt_address)0x00200000UL -}; - -static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { - (hrt_address)0x100000UL -}; - -static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { - (hrt_address)0xffffffffUL, - (hrt_address)0xffffffffUL, - (hrt_address)0xffffffffUL -}; - -static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { - (hrt_address)0xffffffffUL -}; - -/* SP */ -static const hrt_address SP_CTRL_BASE[N_SP_ID] = { - (hrt_address)0x00010000UL -}; - -static const hrt_address SP_DMEM_BASE[N_SP_ID] = { - (hrt_address)0x00300000UL -}; - -static const hrt_address SP_PMEM_BASE[N_SP_ID] = { - (hrt_address)0x000B0000UL -}; - -/* MMU */ -/* - * MMU0_ID: The data MMU - * MMU1_ID: The icache MMU - */ -static const hrt_address MMU_BASE[N_MMU_ID] = { - (hrt_address)0x00070000UL, - (hrt_address)0x000A0000UL -}; - -/* DMA */ -static const hrt_address DMA_BASE[N_DMA_ID] = { - (hrt_address)0x00040000UL -}; - -/* IRQ */ -static const hrt_address IRQ_BASE[N_IRQ_ID] = { - (hrt_address)0x00000500UL, - (hrt_address)0x00030A00UL, - (hrt_address)0x0008C000UL, - (hrt_address)0x00090200UL -}; - -/* - (hrt_address)0x00000500UL}; - */ - -/* GDC */ -static const hrt_address GDC_BASE[N_GDC_ID] = { - (hrt_address)0x00050000UL, - (hrt_address)0x00060000UL -}; - -/* FIFO_MONITOR (not a subset of GP_DEVICE) */ -static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { - (hrt_address)0x00000000UL -}; - -/* -static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { - (hrt_address)0x00000000UL}; - -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - (hrt_address)0x00090000UL}; -*/ - -/* GP_DEVICE (single base for all separate GP_REG instances) */ -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - (hrt_address)0x00000000UL -}; - -/*GP TIMER , all timer registers are inter-twined, - * so, having multiple base addresses for - * different timers does not help*/ -static const hrt_address GP_TIMER_BASE = - (hrt_address)0x00000600UL; - -/* GPIO */ -static const hrt_address GPIO_BASE[N_GPIO_ID] = { - (hrt_address)0x00000400UL -}; - -/* TIMED_CTRL */ -static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { - (hrt_address)0x00000100UL -}; - -/* INPUT_FORMATTER */ -static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { - (hrt_address)0x00030000UL, - (hrt_address)0x00030200UL, - (hrt_address)0x00030400UL -}; - -/* (hrt_address)0x00030600UL, */ /* memcpy() */ - -/* INPUT_SYSTEM */ -static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { - (hrt_address)0x00080000UL -}; - -/* (hrt_address)0x00081000UL, */ /* capture A */ -/* (hrt_address)0x00082000UL, */ /* capture B */ -/* (hrt_address)0x00083000UL, */ /* capture C */ -/* (hrt_address)0x00084000UL, */ /* Acquisition */ -/* (hrt_address)0x00085000UL, */ /* DMA */ -/* (hrt_address)0x00089000UL, */ /* ctrl */ -/* (hrt_address)0x0008A000UL, */ /* GP regs */ -/* (hrt_address)0x0008B000UL, */ /* FIFO */ -/* (hrt_address)0x0008C000UL, */ /* IRQ */ - -/* RX, the MIPI lane control regs start at offset 0 */ -static const hrt_address RX_BASE[N_RX_ID] = { - (hrt_address)0x00080100UL -}; - -#else -#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}" -#endif - -#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/isp2401_system_global.h b/drivers/staging/media/atomisp/pci/isp2401_system_global.h index 8bb2a956f983efb54ce4865d4ce6c6cb787a7e7a..27cd2535bab806fa998b82aa827918f6d473caeb 100644 --- a/drivers/staging/media/atomisp/pci/isp2401_system_global.h +++ b/drivers/staging/media/atomisp/pci/isp2401_system_global.h @@ -13,415 +13,7 @@ * more details. */ -#ifndef __SYSTEM_GLOBAL_H_INCLUDED__ -#define __SYSTEM_GLOBAL_H_INCLUDED__ - -#include -#include - -/* - * The longest allowed (uninteruptible) bus transfer, does not - * take stalling into account - */ -#define HIVE_ISP_MAX_BURST_LENGTH 1024 - -/* - * Maximum allowed burst length in words for the ISP DMA - * This value is set to 2 to prevent the ISP DMA from blocking - * the bus for too long; as the input system can only buffer - * 2 lines on Moorefield and Cherrytrail, the input system buffers - * may overflow if blocked for too long (BZ 2726). - */ -#define ISP_DMA_MAX_BURST_LENGTH 2 - -/* - * Create a list of HAS and IS properties that defines the system - * - * The configuration assumes the following - * - The system is hetereogeneous; Multiple cells and devices classes - * - The cell and device instances are homogeneous, each device type - * belongs to the same class - * - Device instances supporting a subset of the class capabilities are - * allowed - * - * We could manage different device classes through the enumerated - * lists (C) or the use of classes (C++), but that is presently not - * fully supported - * - * N.B. the 3 input formatters are of 2 different classess - */ - +#define HAS_NO_INPUT_FORMATTER #define USE_INPUT_SYSTEM_VERSION_2401 - -#define HAS_MMU_VERSION_2 -#define HAS_DMA_VERSION_2 -#define HAS_GDC_VERSION_2 -#define HAS_VAMEM_VERSION_2 -#define HAS_HMEM_VERSION_1 -#define HAS_BAMEM_VERSION_2 -#define HAS_IRQ_VERSION_2 -#define HAS_IRQ_MAP_VERSION_2 -#define HAS_INPUT_FORMATTER_VERSION_2 -/* 2401: HAS_INPUT_SYSTEM_VERSION_3 */ -/* 2400: HAS_INPUT_SYSTEM_VERSION_2 */ -#define HAS_INPUT_SYSTEM_VERSION_2 #define HAS_INPUT_SYSTEM_VERSION_2401 -#define HAS_BUFFERED_SENSOR -#define HAS_FIFO_MONITORS_VERSION_2 -/* #define HAS_GP_REGS_VERSION_2 */ -#define HAS_GP_DEVICE_VERSION_2 -#define HAS_GPIO_VERSION_1 -#define HAS_TIMED_CTRL_VERSION_1 -#define HAS_RX_VERSION_2 -#define HAS_NO_INPUT_FORMATTER -/*#define HAS_NO_PACKED_RAW_PIXELS*/ -/*#define HAS_NO_DVS_6AXIS_CONFIG_UPDATE*/ - -#define DMA_DDR_TO_VAMEM_WORKAROUND -#define DMA_DDR_TO_HMEM_WORKAROUND - -/* - * Semi global. "HRT" is accessible from SP, but - * the HRT types do not fully apply - */ -#define HRT_VADDRESS_WIDTH 32 -/* Surprise, this is a local property*/ -/*#define HRT_ADDRESS_WIDTH 64 */ -#define HRT_DATA_WIDTH 32 - -#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3) -#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8) - -/* The main bus connecting all devices */ -#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH -#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES - #define CSI2P_DISABLE_ISYS2401_ONLINE_MODE - -/* per-frame parameter handling support */ -#define SH_CSS_ENABLE_PER_FRAME_PARAMS - -typedef u32 hrt_bus_align_t; - -/* - * Enumerate the devices, device access through the API is by ID, - * through the DLI by address. The enumerator terminators are used - * to size the wiring arrays and as an exception value. - */ -typedef enum { - DDR0_ID = 0, - N_DDR_ID -} ddr_ID_t; - -typedef enum { - ISP0_ID = 0, - N_ISP_ID -} isp_ID_t; - -typedef enum { - SP0_ID = 0, - N_SP_ID -} sp_ID_t; - -typedef enum { - MMU0_ID = 0, - MMU1_ID, - N_MMU_ID -} mmu_ID_t; - -typedef enum { - DMA0_ID = 0, - N_DMA_ID -} dma_ID_t; - -typedef enum { - GDC0_ID = 0, - GDC1_ID, - N_GDC_ID -} gdc_ID_t; - -/* this extra define is needed because we want to use it also - in the preprocessor, and that doesn't work with enums. - */ -#define N_GDC_ID_CPP 2 - -typedef enum { - VAMEM0_ID = 0, - VAMEM1_ID, - VAMEM2_ID, - N_VAMEM_ID -} vamem_ID_t; - -typedef enum { - BAMEM0_ID = 0, - N_BAMEM_ID -} bamem_ID_t; - -typedef enum { - HMEM0_ID = 0, - N_HMEM_ID -} hmem_ID_t; - -typedef enum { - ISYS_IRQ0_ID = 0, /* port a */ - ISYS_IRQ1_ID, /* port b */ - ISYS_IRQ2_ID, /* port c */ - N_ISYS_IRQ_ID -} isys_irq_ID_t; - -typedef enum { - IRQ0_ID = 0, /* GP IRQ block */ - IRQ1_ID, /* Input formatter */ - IRQ2_ID, /* input system */ - IRQ3_ID, /* input selector */ - N_IRQ_ID -} irq_ID_t; - -typedef enum { - FIFO_MONITOR0_ID = 0, - N_FIFO_MONITOR_ID -} fifo_monitor_ID_t; - -/* - * Deprecated: Since all gp_reg instances are different - * and put in the address maps of other devices we cannot - * enumerate them as that assumes the instrances are the - * same. - * - * We define a single GP_DEVICE containing all gp_regs - * w.r.t. a single base address - * -typedef enum { - GP_REGS0_ID = 0, - N_GP_REGS_ID -} gp_regs_ID_t; - */ -typedef enum { - GP_DEVICE0_ID = 0, - N_GP_DEVICE_ID -} gp_device_ID_t; - -typedef enum { - GP_TIMER0_ID = 0, - GP_TIMER1_ID, - GP_TIMER2_ID, - GP_TIMER3_ID, - GP_TIMER4_ID, - GP_TIMER5_ID, - GP_TIMER6_ID, - GP_TIMER7_ID, - N_GP_TIMER_ID -} gp_timer_ID_t; - -typedef enum { - GPIO0_ID = 0, - N_GPIO_ID -} gpio_ID_t; - -typedef enum { - TIMED_CTRL0_ID = 0, - N_TIMED_CTRL_ID -} timed_ctrl_ID_t; - -typedef enum { - INPUT_FORMATTER0_ID = 0, - INPUT_FORMATTER1_ID, - INPUT_FORMATTER2_ID, - INPUT_FORMATTER3_ID, - N_INPUT_FORMATTER_ID -} input_formatter_ID_t; - -/* The IF RST is outside the IF */ -#define INPUT_FORMATTER0_SRST_OFFSET 0x0824 -#define INPUT_FORMATTER1_SRST_OFFSET 0x0624 -#define INPUT_FORMATTER2_SRST_OFFSET 0x0424 -#define INPUT_FORMATTER3_SRST_OFFSET 0x0224 - -#define INPUT_FORMATTER0_SRST_MASK 0x0001 -#define INPUT_FORMATTER1_SRST_MASK 0x0002 -#define INPUT_FORMATTER2_SRST_MASK 0x0004 -#define INPUT_FORMATTER3_SRST_MASK 0x0008 - -typedef enum { - INPUT_SYSTEM0_ID = 0, - N_INPUT_SYSTEM_ID -} input_system_ID_t; - -typedef enum { - RX0_ID = 0, - N_RX_ID -} rx_ID_t; - -enum mipi_port_id { - MIPI_PORT0_ID = 0, - MIPI_PORT1_ID, - MIPI_PORT2_ID, - N_MIPI_PORT_ID -}; - -#define N_RX_CHANNEL_ID 4 - -/* Generic port enumeration with an internal port type ID */ -typedef enum { - CSI_PORT0_ID = 0, - CSI_PORT1_ID, - CSI_PORT2_ID, - TPG_PORT0_ID, - PRBS_PORT0_ID, - FIFO_PORT0_ID, - MEMORY_PORT0_ID, - N_INPUT_PORT_ID -} input_port_ID_t; - -typedef enum { - CAPTURE_UNIT0_ID = 0, - CAPTURE_UNIT1_ID, - CAPTURE_UNIT2_ID, - ACQUISITION_UNIT0_ID, - DMA_UNIT0_ID, - CTRL_UNIT0_ID, - GPREGS_UNIT0_ID, - FIFO_UNIT0_ID, - IRQ_UNIT0_ID, - N_SUB_SYSTEM_ID -} sub_system_ID_t; - -#define N_CAPTURE_UNIT_ID 3 -#define N_ACQUISITION_UNIT_ID 1 -#define N_CTRL_UNIT_ID 1 - -/* - * Input-buffer Controller. - */ -typedef enum { - IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */ - IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */ - IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */ - N_IBUF_CTRL_ID -} ibuf_ctrl_ID_t; -/* end of Input-buffer Controller */ - -/* - * Stream2MMIO. - */ -typedef enum { - STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */ - STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */ - STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */ - N_STREAM2MMIO_ID -} stream2mmio_ID_t; - -typedef enum { - /* - * Stream2MMIO 0 has 8 SIDs that are indexed by - * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID]. - * - * Stream2MMIO 1 has 4 SIDs that are indexed by - * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID]. - * - * Stream2MMIO 2 has 4 SIDs that are indexed by - * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID]. - */ - STREAM2MMIO_SID0_ID = 0, - STREAM2MMIO_SID1_ID, - STREAM2MMIO_SID2_ID, - STREAM2MMIO_SID3_ID, - STREAM2MMIO_SID4_ID, - STREAM2MMIO_SID5_ID, - STREAM2MMIO_SID6_ID, - STREAM2MMIO_SID7_ID, - N_STREAM2MMIO_SID_ID -} stream2mmio_sid_ID_t; -/* end of Stream2MMIO */ - -/** - * Input System 2401: CSI-MIPI recevier. - */ -typedef enum { - CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */ - CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */ - CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */ - N_CSI_RX_BACKEND_ID -} csi_rx_backend_ID_t; - -typedef enum { - CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */ - CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */ - CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */ -#define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID + 1) -} csi_rx_frontend_ID_t; - -typedef enum { - CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */ - CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */ - CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */ - CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */ - N_CSI_RX_DLANE_ID -} csi_rx_fe_dlane_ID_t; -/* end of CSI-MIPI receiver */ - -typedef enum { - ISYS2401_DMA0_ID = 0, - N_ISYS2401_DMA_ID -} isys2401_dma_ID_t; - -/** - * Pixel-generator. ("system_global.h") - */ -typedef enum { - PIXELGEN0_ID = 0, - PIXELGEN1_ID, - PIXELGEN2_ID, - N_PIXELGEN_ID -} pixelgen_ID_t; -/* end of pixel-generator. ("system_global.h") */ - -typedef enum { - INPUT_SYSTEM_CSI_PORT0_ID = 0, - INPUT_SYSTEM_CSI_PORT1_ID, - INPUT_SYSTEM_CSI_PORT2_ID, - - INPUT_SYSTEM_PIXELGEN_PORT0_ID, - INPUT_SYSTEM_PIXELGEN_PORT1_ID, - INPUT_SYSTEM_PIXELGEN_PORT2_ID, - - N_INPUT_SYSTEM_INPUT_PORT_ID -} input_system_input_port_ID_t; - -#define N_INPUT_SYSTEM_CSI_PORT 3 - -typedef enum { - ISYS2401_DMA_CHANNEL_0 = 0, - ISYS2401_DMA_CHANNEL_1, - ISYS2401_DMA_CHANNEL_2, - ISYS2401_DMA_CHANNEL_3, - ISYS2401_DMA_CHANNEL_4, - ISYS2401_DMA_CHANNEL_5, - ISYS2401_DMA_CHANNEL_6, - ISYS2401_DMA_CHANNEL_7, - ISYS2401_DMA_CHANNEL_8, - ISYS2401_DMA_CHANNEL_9, - ISYS2401_DMA_CHANNEL_10, - ISYS2401_DMA_CHANNEL_11, - N_ISYS2401_DMA_CHANNEL -} isys2401_dma_channel; - -enum ia_css_isp_memories { - IA_CSS_ISP_PMEM0 = 0, - IA_CSS_ISP_DMEM0, - IA_CSS_ISP_VMEM0, - IA_CSS_ISP_VAMEM0, - IA_CSS_ISP_VAMEM1, - IA_CSS_ISP_VAMEM2, - IA_CSS_ISP_HMEM0, - IA_CSS_SP_DMEM0, - IA_CSS_DDR, - N_IA_CSS_MEMORIES -}; - -#define IA_CSS_NUM_MEMORIES 9 -/* For driver compatibility */ -#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES -#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES - -#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/isp2401_system_local.h b/drivers/staging/media/atomisp/pci/isp2401_system_local.h deleted file mode 100644 index b09f8faadb13b71ae4cc05a697a757fc909c71ea..0000000000000000000000000000000000000000 --- a/drivers/staging/media/atomisp/pci/isp2401_system_local.h +++ /dev/null @@ -1,402 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Support for Intel Camera Imaging ISP subsystem. - * Copyright (c) 2015, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#ifndef __SYSTEM_LOCAL_H_INCLUDED__ -#define __SYSTEM_LOCAL_H_INCLUDED__ - -#ifdef HRT_ISP_CSS_CUSTOM_HOST -#ifndef HRT_USE_VIR_ADDRS -#define HRT_USE_VIR_ADDRS -#endif -#endif - -#include "system_global.h" - -#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */ - -/* This interface is deprecated */ -#include "hive_types.h" - -/* - * Cell specific address maps - */ -#if HRT_ADDRESS_WIDTH == 64 - -#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */ - -/* DDR */ -static const hrt_address DDR_BASE[N_DDR_ID] = { - 0x0000000120000000ULL -}; - -/* ISP */ -static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { - 0x0000000000020000ULL -}; - -static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { - 0x0000000000200000ULL -}; - -static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { - 0x0000000000100000ULL -}; - -static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { - 0x00000000001C0000ULL, - 0x00000000001D0000ULL, - 0x00000000001E0000ULL -}; - -static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { - 0x00000000001F0000ULL -}; - -/* SP */ -static const hrt_address SP_CTRL_BASE[N_SP_ID] = { - 0x0000000000010000ULL -}; - -static const hrt_address SP_DMEM_BASE[N_SP_ID] = { - 0x0000000000300000ULL -}; - -/* MMU */ -/* - * MMU0_ID: The data MMU - * MMU1_ID: The icache MMU - */ -static const hrt_address MMU_BASE[N_MMU_ID] = { - 0x0000000000070000ULL, - 0x00000000000A0000ULL -}; - -/* DMA */ -static const hrt_address DMA_BASE[N_DMA_ID] = { - 0x0000000000040000ULL -}; - -static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = { - 0x00000000000CA000ULL -}; - -/* IRQ */ -static const hrt_address IRQ_BASE[N_IRQ_ID] = { - 0x0000000000000500ULL, - 0x0000000000030A00ULL, - 0x000000000008C000ULL, - 0x0000000000090200ULL -}; - -/* - 0x0000000000000500ULL}; - */ - -/* GDC */ -static const hrt_address GDC_BASE[N_GDC_ID] = { - 0x0000000000050000ULL, - 0x0000000000060000ULL -}; - -/* FIFO_MONITOR (not a subset of GP_DEVICE) */ -static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { - 0x0000000000000000ULL -}; - -/* -static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { - 0x0000000000000000ULL}; - -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - 0x0000000000090000ULL}; -*/ - -/* GP_DEVICE (single base for all separate GP_REG instances) */ -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - 0x0000000000000000ULL -}; - -/*GP TIMER , all timer registers are inter-twined, - * so, having multiple base addresses for - * different timers does not help*/ -static const hrt_address GP_TIMER_BASE = - (hrt_address)0x0000000000000600ULL; - -/* GPIO */ -static const hrt_address GPIO_BASE[N_GPIO_ID] = { - 0x0000000000000400ULL -}; - -/* TIMED_CTRL */ -static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { - 0x0000000000000100ULL -}; - -/* INPUT_FORMATTER */ -static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { - 0x0000000000030000ULL, - 0x0000000000030200ULL, - 0x0000000000030400ULL, - 0x0000000000030600ULL -}; /* memcpy() */ - -/* INPUT_SYSTEM */ -static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { - 0x0000000000080000ULL -}; - -/* 0x0000000000081000ULL, */ /* capture A */ -/* 0x0000000000082000ULL, */ /* capture B */ -/* 0x0000000000083000ULL, */ /* capture C */ -/* 0x0000000000084000ULL, */ /* Acquisition */ -/* 0x0000000000085000ULL, */ /* DMA */ -/* 0x0000000000089000ULL, */ /* ctrl */ -/* 0x000000000008A000ULL, */ /* GP regs */ -/* 0x000000000008B000ULL, */ /* FIFO */ -/* 0x000000000008C000ULL, */ /* IRQ */ - -/* RX, the MIPI lane control regs start at offset 0 */ -static const hrt_address RX_BASE[N_RX_ID] = { - 0x0000000000080100ULL -}; - -/* IBUF_CTRL, part of the Input System 2401 */ -static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = { - 0x00000000000C1800ULL, /* ibuf controller A */ - 0x00000000000C3800ULL, /* ibuf controller B */ - 0x00000000000C5800ULL /* ibuf controller C */ -}; - -/* ISYS IRQ Controllers, part of the Input System 2401 */ -static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = { - 0x00000000000C1400ULL, /* port a */ - 0x00000000000C3400ULL, /* port b */ - 0x00000000000C5400ULL /* port c */ -}; - -/* CSI FE, part of the Input System 2401 */ -static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = { - 0x00000000000C0400ULL, /* csi fe controller A */ - 0x00000000000C2400ULL, /* csi fe controller B */ - 0x00000000000C4400ULL /* csi fe controller C */ -}; - -/* CSI BE, part of the Input System 2401 */ -static const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = { - 0x00000000000C0800ULL, /* csi be controller A */ - 0x00000000000C2800ULL, /* csi be controller B */ - 0x00000000000C4800ULL /* csi be controller C */ -}; - -/* PIXEL Generator, part of the Input System 2401 */ -static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = { - 0x00000000000C1000ULL, /* pixel gen controller A */ - 0x00000000000C3000ULL, /* pixel gen controller B */ - 0x00000000000C5000ULL /* pixel gen controller C */ -}; - -/* Stream2MMIO, part of the Input System 2401 */ -static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = { - 0x00000000000C0C00ULL, /* stream2mmio controller A */ - 0x00000000000C2C00ULL, /* stream2mmio controller B */ - 0x00000000000C4C00ULL /* stream2mmio controller C */ -}; -#elif HRT_ADDRESS_WIDTH == 32 - -#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */ - -/* DDR : Attention, this value not defined in 32-bit */ -static const hrt_address DDR_BASE[N_DDR_ID] = { - 0x00000000UL -}; - -/* ISP */ -static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { - 0x00020000UL -}; - -static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { - 0xffffffffUL -}; - -static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { - 0xffffffffUL -}; - -static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = { - 0xffffffffUL, - 0xffffffffUL, - 0xffffffffUL -}; - -static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = { - 0xffffffffUL -}; - -/* SP */ -static const hrt_address SP_CTRL_BASE[N_SP_ID] = { - 0x00010000UL -}; - -static const hrt_address SP_DMEM_BASE[N_SP_ID] = { - 0x00300000UL -}; - -/* MMU */ -/* - * MMU0_ID: The data MMU - * MMU1_ID: The icache MMU - */ -static const hrt_address MMU_BASE[N_MMU_ID] = { - 0x00070000UL, - 0x000A0000UL -}; - -/* DMA */ -static const hrt_address DMA_BASE[N_DMA_ID] = { - 0x00040000UL -}; - -static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = { - 0x000CA000UL -}; - -/* IRQ */ -static const hrt_address IRQ_BASE[N_IRQ_ID] = { - 0x00000500UL, - 0x00030A00UL, - 0x0008C000UL, - 0x00090200UL -}; - -/* - 0x00000500UL}; - */ - -/* GDC */ -static const hrt_address GDC_BASE[N_GDC_ID] = { - 0x00050000UL, - 0x00060000UL -}; - -/* FIFO_MONITOR (not a subset of GP_DEVICE) */ -static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { - 0x00000000UL -}; - -/* -static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { - 0x00000000UL}; - -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - 0x00090000UL}; -*/ - -/* GP_DEVICE (single base for all separate GP_REG instances) */ -static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { - 0x00000000UL -}; - -/*GP TIMER , all timer registers are inter-twined, - * so, having multiple base addresses for - * different timers does not help*/ -static const hrt_address GP_TIMER_BASE = - (hrt_address)0x00000600UL; -/* GPIO */ -static const hrt_address GPIO_BASE[N_GPIO_ID] = { - 0x00000400UL -}; - -/* TIMED_CTRL */ -static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { - 0x00000100UL -}; - -/* INPUT_FORMATTER */ -static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { - 0x00030000UL, - 0x00030200UL, - 0x00030400UL -}; - -/* 0x00030600UL, */ /* memcpy() */ - -/* INPUT_SYSTEM */ -static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { - 0x00080000UL -}; - -/* 0x00081000UL, */ /* capture A */ -/* 0x00082000UL, */ /* capture B */ -/* 0x00083000UL, */ /* capture C */ -/* 0x00084000UL, */ /* Acquisition */ -/* 0x00085000UL, */ /* DMA */ -/* 0x00089000UL, */ /* ctrl */ -/* 0x0008A000UL, */ /* GP regs */ -/* 0x0008B000UL, */ /* FIFO */ -/* 0x0008C000UL, */ /* IRQ */ - -/* RX, the MIPI lane control regs start at offset 0 */ -static const hrt_address RX_BASE[N_RX_ID] = { - 0x00080100UL -}; - -/* IBUF_CTRL, part of the Input System 2401 */ -static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = { - 0x000C1800UL, /* ibuf controller A */ - 0x000C3800UL, /* ibuf controller B */ - 0x000C5800UL /* ibuf controller C */ -}; - -/* ISYS IRQ Controllers, part of the Input System 2401 */ -static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = { - 0x000C1400ULL, /* port a */ - 0x000C3400ULL, /* port b */ - 0x000C5400ULL /* port c */ -}; - -/* CSI FE, part of the Input System 2401 */ -static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = { - 0x000C0400UL, /* csi fe controller A */ - 0x000C2400UL, /* csi fe controller B */ - 0x000C4400UL /* csi fe controller C */ -}; - -/* CSI BE, part of the Input System 2401 */ -static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = { - 0x000C0800UL, /* csi be controller A */ - 0x000C2800UL, /* csi be controller B */ - 0x000C4800UL /* csi be controller C */ -}; - -/* PIXEL Generator, part of the Input System 2401 */ -static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = { - 0x000C1000UL, /* pixel gen controller A */ - 0x000C3000UL, /* pixel gen controller B */ - 0x000C5000UL /* pixel gen controller C */ -}; - -/* Stream2MMIO, part of the Input System 2401 */ -static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = { - 0x000C0C00UL, /* stream2mmio controller A */ - 0x000C2C00UL, /* stream2mmio controller B */ - 0x000C4C00UL /* stream2mmio controller C */ -}; - -#else -#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}" -#endif - -#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c index 6676537f0e970ac16dd83e29da19bcf0424c898e..54434c2dbaf904d8037b8f709452ede91ee8e744 100644 --- a/drivers/staging/media/atomisp/pci/sh_css.c +++ b/drivers/staging/media/atomisp/pci/sh_css.c @@ -1841,8 +1841,13 @@ ia_css_init(struct device *dev, const struct ia_css_env *env, #endif #if !defined(HAS_NO_INPUT_SYSTEM) - dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN, - ISP_DMA_MAX_BURST_LENGTH); + + if (!IS_ISP2401) + dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN, + ISP2400_DMA_MAX_BURST_LENGTH); + else + dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN, + ISP2401_DMA_MAX_BURST_LENGTH); if (ia_css_isys_init() != INPUT_SYSTEM_ERR_NO_ERROR) err = -EINVAL; diff --git a/drivers/staging/media/atomisp/pci/system_global.h b/drivers/staging/media/atomisp/pci/system_global.h index 16d0a2e9a4dc7b07b06a1df3bd7de03dedd1cc92..90210f6943d22013b4ab3742cc86962ff2d9766f 100644 --- a/drivers/staging/media/atomisp/pci/system_global.h +++ b/drivers/staging/media/atomisp/pci/system_global.h @@ -4,8 +4,403 @@ * (c) 2020 Mauro Carvalho Chehab */ +#ifndef __SYSTEM_GLOBAL_H_INCLUDED__ +#define __SYSTEM_GLOBAL_H_INCLUDED__ + +/* + * Create a list of HAS and IS properties that defines the system + * Those are common for both ISP2400 and ISP2401 + * + * The configuration assumes the following + * - The system is hetereogeneous; Multiple cells and devices classes + * - The cell and device instances are homogeneous, each device type + * belongs to the same class + * - Device instances supporting a subset of the class capabilities are + * allowed + * + * We could manage different device classes through the enumerated + * lists (C) or the use of classes (C++), but that is presently not + * fully supported + * + * N.B. the 3 input formatters are of 2 different classess + */ + +#define HAS_MMU_VERSION_2 +#define HAS_DMA_VERSION_2 +#define HAS_GDC_VERSION_2 +#define HAS_VAMEM_VERSION_2 +#define HAS_HMEM_VERSION_1 +#define HAS_BAMEM_VERSION_2 +#define HAS_IRQ_VERSION_2 +#define HAS_IRQ_MAP_VERSION_2 +#define HAS_INPUT_FORMATTER_VERSION_2 +#define HAS_INPUT_SYSTEM_VERSION_2 +#define HAS_BUFFERED_SENSOR +#define HAS_FIFO_MONITORS_VERSION_2 +#define HAS_GP_DEVICE_VERSION_2 +#define HAS_GPIO_VERSION_1 +#define HAS_TIMED_CTRL_VERSION_1 +#define HAS_RX_VERSION_2 + +/* per-frame parameter handling support */ +#define SH_CSS_ENABLE_PER_FRAME_PARAMS + +#define DMA_DDR_TO_VAMEM_WORKAROUND +#define DMA_DDR_TO_HMEM_WORKAROUND + +/* + * The longest allowed (uninteruptible) bus transfer, does not + * take stalling into account + */ +#define HIVE_ISP_MAX_BURST_LENGTH 1024 + +/* + * Maximum allowed burst length in words for the ISP DMA + * This value is set to 2 to prevent the ISP DMA from blocking + * the bus for too long; as the input system can only buffer + * 2 lines on Moorefield and Cherrytrail, the input system buffers + * may overflow if blocked for too long (BZ 2726). + */ +#define ISP2400_DMA_MAX_BURST_LENGTH 128 +#define ISP2401_DMA_MAX_BURST_LENGTH 2 + #ifdef ISP2401 # include "isp2401_system_global.h" #else # include "isp2400_system_global.h" #endif + +#include +#include + +/* This interface is deprecated */ +#include "hive_types.h" + +/* + * Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply + */ +#define HRT_VADDRESS_WIDTH 32 + +#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3) +#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8) + +/* The main bus connecting all devices */ +#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH +#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES + +typedef u32 hrt_bus_align_t; + +/* + * Enumerate the devices, device access through the API is by ID, + * through the DLI by address. The enumerator terminators are used + * to size the wiring arrays and as an exception value. + */ +typedef enum { + DDR0_ID = 0, + N_DDR_ID +} ddr_ID_t; + +typedef enum { + ISP0_ID = 0, + N_ISP_ID +} isp_ID_t; + +typedef enum { + SP0_ID = 0, + N_SP_ID +} sp_ID_t; + +typedef enum { + MMU0_ID = 0, + MMU1_ID, + N_MMU_ID +} mmu_ID_t; + +typedef enum { + DMA0_ID = 0, + N_DMA_ID +} dma_ID_t; + +typedef enum { + GDC0_ID = 0, + GDC1_ID, + N_GDC_ID +} gdc_ID_t; + +/* this extra define is needed because we want to use it also + in the preprocessor, and that doesn't work with enums. + */ +#define N_GDC_ID_CPP 2 + +typedef enum { + VAMEM0_ID = 0, + VAMEM1_ID, + VAMEM2_ID, + N_VAMEM_ID +} vamem_ID_t; + +typedef enum { + BAMEM0_ID = 0, + N_BAMEM_ID +} bamem_ID_t; + +typedef enum { + HMEM0_ID = 0, + N_HMEM_ID +} hmem_ID_t; + +typedef enum { + IRQ0_ID = 0, /* GP IRQ block */ + IRQ1_ID, /* Input formatter */ + IRQ2_ID, /* input system */ + IRQ3_ID, /* input selector */ + N_IRQ_ID +} irq_ID_t; + +typedef enum { + FIFO_MONITOR0_ID = 0, + N_FIFO_MONITOR_ID +} fifo_monitor_ID_t; + +typedef enum { + GP_DEVICE0_ID = 0, + N_GP_DEVICE_ID +} gp_device_ID_t; + +typedef enum { + GP_TIMER0_ID = 0, + GP_TIMER1_ID, + GP_TIMER2_ID, + GP_TIMER3_ID, + GP_TIMER4_ID, + GP_TIMER5_ID, + GP_TIMER6_ID, + GP_TIMER7_ID, + N_GP_TIMER_ID +} gp_timer_ID_t; + +typedef enum { + GPIO0_ID = 0, + N_GPIO_ID +} gpio_ID_t; + +typedef enum { + TIMED_CTRL0_ID = 0, + N_TIMED_CTRL_ID +} timed_ctrl_ID_t; + +typedef enum { + INPUT_FORMATTER0_ID = 0, + INPUT_FORMATTER1_ID, + INPUT_FORMATTER2_ID, + INPUT_FORMATTER3_ID, + N_INPUT_FORMATTER_ID +} input_formatter_ID_t; + +/* The IF RST is outside the IF */ +#define INPUT_FORMATTER0_SRST_OFFSET 0x0824 +#define INPUT_FORMATTER1_SRST_OFFSET 0x0624 +#define INPUT_FORMATTER2_SRST_OFFSET 0x0424 +#define INPUT_FORMATTER3_SRST_OFFSET 0x0224 + +#define INPUT_FORMATTER0_SRST_MASK 0x0001 +#define INPUT_FORMATTER1_SRST_MASK 0x0002 +#define INPUT_FORMATTER2_SRST_MASK 0x0004 +#define INPUT_FORMATTER3_SRST_MASK 0x0008 + +typedef enum { + INPUT_SYSTEM0_ID = 0, + N_INPUT_SYSTEM_ID +} input_system_ID_t; + +typedef enum { + RX0_ID = 0, + N_RX_ID +} rx_ID_t; + +enum mipi_port_id { + MIPI_PORT0_ID = 0, + MIPI_PORT1_ID, + MIPI_PORT2_ID, + N_MIPI_PORT_ID +}; + +#define N_RX_CHANNEL_ID 4 + +/* Generic port enumeration with an internal port type ID */ +typedef enum { + CSI_PORT0_ID = 0, + CSI_PORT1_ID, + CSI_PORT2_ID, + TPG_PORT0_ID, + PRBS_PORT0_ID, + FIFO_PORT0_ID, + MEMORY_PORT0_ID, + N_INPUT_PORT_ID +} input_port_ID_t; + +typedef enum { + CAPTURE_UNIT0_ID = 0, + CAPTURE_UNIT1_ID, + CAPTURE_UNIT2_ID, + ACQUISITION_UNIT0_ID, + DMA_UNIT0_ID, + CTRL_UNIT0_ID, + GPREGS_UNIT0_ID, + FIFO_UNIT0_ID, + IRQ_UNIT0_ID, + N_SUB_SYSTEM_ID +} sub_system_ID_t; + +#define N_CAPTURE_UNIT_ID 3 +#define N_ACQUISITION_UNIT_ID 1 +#define N_CTRL_UNIT_ID 1 + + +enum ia_css_isp_memories { + IA_CSS_ISP_PMEM0 = 0, + IA_CSS_ISP_DMEM0, + IA_CSS_ISP_VMEM0, + IA_CSS_ISP_VAMEM0, + IA_CSS_ISP_VAMEM1, + IA_CSS_ISP_VAMEM2, + IA_CSS_ISP_HMEM0, + IA_CSS_SP_DMEM0, + IA_CSS_DDR, + N_IA_CSS_MEMORIES +}; + +#define IA_CSS_NUM_MEMORIES 9 +/* For driver compatibility */ +#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES +#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES + +/* + * ISP2401 specific enums + */ + +typedef enum { + ISYS_IRQ0_ID = 0, /* port a */ + ISYS_IRQ1_ID, /* port b */ + ISYS_IRQ2_ID, /* port c */ + N_ISYS_IRQ_ID +} isys_irq_ID_t; + + +/* + * Input-buffer Controller. + */ +typedef enum { + IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */ + IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */ + IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */ + N_IBUF_CTRL_ID +} ibuf_ctrl_ID_t; +/* end of Input-buffer Controller */ + +/* + * Stream2MMIO. + */ +typedef enum { + STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */ + STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */ + STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */ + N_STREAM2MMIO_ID +} stream2mmio_ID_t; + +typedef enum { + /* + * Stream2MMIO 0 has 8 SIDs that are indexed by + * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID]. + * + * Stream2MMIO 1 has 4 SIDs that are indexed by + * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID]. + * + * Stream2MMIO 2 has 4 SIDs that are indexed by + * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID]. + */ + STREAM2MMIO_SID0_ID = 0, + STREAM2MMIO_SID1_ID, + STREAM2MMIO_SID2_ID, + STREAM2MMIO_SID3_ID, + STREAM2MMIO_SID4_ID, + STREAM2MMIO_SID5_ID, + STREAM2MMIO_SID6_ID, + STREAM2MMIO_SID7_ID, + N_STREAM2MMIO_SID_ID +} stream2mmio_sid_ID_t; +/* end of Stream2MMIO */ + +/** + * Input System 2401: CSI-MIPI recevier. + */ +typedef enum { + CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */ + CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */ + CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */ + N_CSI_RX_BACKEND_ID +} csi_rx_backend_ID_t; + +typedef enum { + CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */ + CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */ + CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */ +#define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID + 1) +} csi_rx_frontend_ID_t; + +typedef enum { + CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */ + CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */ + CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */ + CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */ + N_CSI_RX_DLANE_ID +} csi_rx_fe_dlane_ID_t; +/* end of CSI-MIPI receiver */ + +typedef enum { + ISYS2401_DMA0_ID = 0, + N_ISYS2401_DMA_ID +} isys2401_dma_ID_t; + +/** + * Pixel-generator. ("system_global.h") + */ +typedef enum { + PIXELGEN0_ID = 0, + PIXELGEN1_ID, + PIXELGEN2_ID, + N_PIXELGEN_ID +} pixelgen_ID_t; +/* end of pixel-generator. ("system_global.h") */ + +typedef enum { + INPUT_SYSTEM_CSI_PORT0_ID = 0, + INPUT_SYSTEM_CSI_PORT1_ID, + INPUT_SYSTEM_CSI_PORT2_ID, + + INPUT_SYSTEM_PIXELGEN_PORT0_ID, + INPUT_SYSTEM_PIXELGEN_PORT1_ID, + INPUT_SYSTEM_PIXELGEN_PORT2_ID, + + N_INPUT_SYSTEM_INPUT_PORT_ID +} input_system_input_port_ID_t; + +#define N_INPUT_SYSTEM_CSI_PORT 3 + +typedef enum { + ISYS2401_DMA_CHANNEL_0 = 0, + ISYS2401_DMA_CHANNEL_1, + ISYS2401_DMA_CHANNEL_2, + ISYS2401_DMA_CHANNEL_3, + ISYS2401_DMA_CHANNEL_4, + ISYS2401_DMA_CHANNEL_5, + ISYS2401_DMA_CHANNEL_6, + ISYS2401_DMA_CHANNEL_7, + ISYS2401_DMA_CHANNEL_8, + ISYS2401_DMA_CHANNEL_9, + ISYS2401_DMA_CHANNEL_10, + ISYS2401_DMA_CHANNEL_11, + N_ISYS2401_DMA_CHANNEL +} isys2401_dma_channel; + +#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */ diff --git a/drivers/staging/media/atomisp/pci/system_local.c b/drivers/staging/media/atomisp/pci/system_local.c new file mode 100644 index 0000000000000000000000000000000000000000..4ca8569d7feb932d122093e745d6cd9244741410 --- /dev/null +++ b/drivers/staging/media/atomisp/pci/system_local.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "system_local.h" + +/* ISP */ +const hrt_address ISP_CTRL_BASE[N_ISP_ID] = { + 0x0000000000020000ULL +}; + +const hrt_address ISP_DMEM_BASE[N_ISP_ID] = { + 0x0000000000200000ULL +}; + +const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = { + 0x0000000000100000ULL +}; + +/* SP */ +const hrt_address SP_CTRL_BASE[N_SP_ID] = { + 0x0000000000010000ULL +}; + +const hrt_address SP_DMEM_BASE[N_SP_ID] = { + 0x0000000000300000ULL +}; + +/* MMU */ +/* + * MMU0_ID: The data MMU + * MMU1_ID: The icache MMU + */ +const hrt_address MMU_BASE[N_MMU_ID] = { + 0x0000000000070000ULL, + 0x00000000000A0000ULL +}; + +/* DMA */ +const hrt_address DMA_BASE[N_DMA_ID] = { + 0x0000000000040000ULL +}; + +const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = { + 0x00000000000CA000ULL +}; + +/* IRQ */ +const hrt_address IRQ_BASE[N_IRQ_ID] = { + 0x0000000000000500ULL, + 0x0000000000030A00ULL, + 0x000000000008C000ULL, + 0x0000000000090200ULL +}; + +/* + 0x0000000000000500ULL}; + */ + +/* GDC */ +const hrt_address GDC_BASE[N_GDC_ID] = { + 0x0000000000050000ULL, + 0x0000000000060000ULL +}; + +/* FIFO_MONITOR (not a subset of GP_DEVICE) */ +const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = { + 0x0000000000000000ULL +}; + +/* +const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = { + 0x0000000000000000ULL}; + +const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { + 0x0000000000090000ULL}; +*/ + +/* GP_DEVICE (single base for all separate GP_REG instances) */ +const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = { + 0x0000000000000000ULL +}; + +/*GP TIMER , all timer registers are inter-twined, + * so, having multiple base addresses for + * different timers does not help*/ +const hrt_address GP_TIMER_BASE = + (hrt_address)0x0000000000000600ULL; + +/* GPIO */ +const hrt_address GPIO_BASE[N_GPIO_ID] = { + 0x0000000000000400ULL +}; + +/* TIMED_CTRL */ +const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = { + 0x0000000000000100ULL +}; + +/* INPUT_FORMATTER */ +const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = { + 0x0000000000030000ULL, + 0x0000000000030200ULL, + 0x0000000000030400ULL, + 0x0000000000030600ULL +}; /* memcpy() */ + +/* INPUT_SYSTEM */ +const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = { + 0x0000000000080000ULL +}; + +/* 0x0000000000081000ULL, */ /* capture A */ +/* 0x0000000000082000ULL, */ /* capture B */ +/* 0x0000000000083000ULL, */ /* capture C */ +/* 0x0000000000084000ULL, */ /* Acquisition */ +/* 0x0000000000085000ULL, */ /* DMA */ +/* 0x0000000000089000ULL, */ /* ctrl */ +/* 0x000000000008A000ULL, */ /* GP regs */ +/* 0x000000000008B000ULL, */ /* FIFO */ +/* 0x000000000008C000ULL, */ /* IRQ */ + +/* RX, the MIPI lane control regs start at offset 0 */ +const hrt_address RX_BASE[N_RX_ID] = { + 0x0000000000080100ULL +}; + +/* IBUF_CTRL, part of the Input System 2401 */ +const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = { + 0x00000000000C1800ULL, /* ibuf controller A */ + 0x00000000000C3800ULL, /* ibuf controller B */ + 0x00000000000C5800ULL /* ibuf controller C */ +}; + +/* ISYS IRQ Controllers, part of the Input System 2401 */ +const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = { + 0x00000000000C1400ULL, /* port a */ + 0x00000000000C3400ULL, /* port b */ + 0x00000000000C5400ULL /* port c */ +}; + +/* CSI FE, part of the Input System 2401 */ +const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = { + 0x00000000000C0400ULL, /* csi fe controller A */ + 0x00000000000C2400ULL, /* csi fe controller B */ + 0x00000000000C4400ULL /* csi fe controller C */ +}; + +/* CSI BE, part of the Input System 2401 */ +const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = { + 0x00000000000C0800ULL, /* csi be controller A */ + 0x00000000000C2800ULL, /* csi be controller B */ + 0x00000000000C4800ULL /* csi be controller C */ +}; + +/* PIXEL Generator, part of the Input System 2401 */ +const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = { + 0x00000000000C1000ULL, /* pixel gen controller A */ + 0x00000000000C3000ULL, /* pixel gen controller B */ + 0x00000000000C5000ULL /* pixel gen controller C */ +}; + +/* Stream2MMIO, part of the Input System 2401 */ +const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = { + 0x00000000000C0C00ULL, /* stream2mmio controller A */ + 0x00000000000C2C00ULL, /* stream2mmio controller B */ + 0x00000000000C4C00ULL /* stream2mmio controller C */ +}; diff --git a/drivers/staging/media/atomisp/pci/system_local.h b/drivers/staging/media/atomisp/pci/system_local.h index dfcc4c2b8f16ced1c919dcc4df4490036f987013..a47258c2e8a87137822799e32b7d4240d30effee 100644 --- a/drivers/staging/media/atomisp/pci/system_local.h +++ b/drivers/staging/media/atomisp/pci/system_local.h @@ -1,11 +1,103 @@ /* SPDX-License-Identifier: GPL-2.0 */ -// SPDX-License-Identifier: GPL-2.0-or-later /* - * (c) 2020 Mauro Carvalho Chehab + * Support for Intel Camera Imaging ISP subsystem. + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. */ -#ifdef ISP2401 -# include "isp2401_system_local.h" -#else -# include "isp2400_system_local.h" +#ifndef __SYSTEM_LOCAL_H_INCLUDED__ +#define __SYSTEM_LOCAL_H_INCLUDED__ + +#ifdef HRT_ISP_CSS_CUSTOM_HOST +#ifndef HRT_USE_VIR_ADDRS +#define HRT_USE_VIR_ADDRS +#endif #endif + +#include "system_global.h" + +/* This interface is deprecated */ +#include "hive_types.h" + +/* + * Cell specific address maps + */ + +#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */ + +/* ISP */ +extern const hrt_address ISP_CTRL_BASE[N_ISP_ID]; +extern const hrt_address ISP_DMEM_BASE[N_ISP_ID]; +extern const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID]; + +/* SP */ +extern const hrt_address SP_CTRL_BASE[N_SP_ID]; +extern const hrt_address SP_DMEM_BASE[N_SP_ID]; + +/* MMU */ + +extern const hrt_address MMU_BASE[N_MMU_ID]; + +/* DMA */ +extern const hrt_address DMA_BASE[N_DMA_ID]; +extern const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID]; + +/* IRQ */ +extern const hrt_address IRQ_BASE[N_IRQ_ID]; + +/* GDC */ +extern const hrt_address GDC_BASE[N_GDC_ID]; + +/* FIFO_MONITOR (not a subset of GP_DEVICE) */ +extern const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID]; + +/* GP_DEVICE (single base for all separate GP_REG instances) */ +extern const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID]; + +/*GP TIMER , all timer registers are inter-twined, + * so, having multiple base addresses for + * different timers does not help*/ +extern const hrt_address GP_TIMER_BASE; + +/* GPIO */ +extern const hrt_address GPIO_BASE[N_GPIO_ID]; + +/* TIMED_CTRL */ +extern const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID]; + +/* INPUT_FORMATTER */ +extern const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID]; + +/* INPUT_SYSTEM */ +extern const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID]; + +/* RX, the MIPI lane control regs start at offset 0 */ +extern const hrt_address RX_BASE[N_RX_ID]; + +/* IBUF_CTRL, part of the Input System 2401 */ +extern const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID]; + +/* ISYS IRQ Controllers, part of the Input System 2401 */ +extern const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID]; + +/* CSI FE, part of the Input System 2401 */ +extern const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID]; + +/* CSI BE, part of the Input System 2401 */ +extern const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID]; + +/* PIXEL Generator, part of the Input System 2401 */ +extern const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID]; + +/* Stream2MMIO, part of the Input System 2401 */ +extern const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID]; + +#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */ diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index 4689b2170e4fa3d01de25c695c1ae5980e6f8d92..456603fd26c0b9892d902927a8776c5cc9db8b03 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -61,11 +61,25 @@ static int prism2sta_probe_usb(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev; - + const struct usb_endpoint_descriptor *epd; + const struct usb_host_interface *iface_desc = interface->cur_altsetting; struct wlandevice *wlandev = NULL; struct hfa384x *hw = NULL; int result = 0; + if (iface_desc->desc.bNumEndpoints != 2) { + result = -ENODEV; + goto failed; + } + + result = -EINVAL; + epd = &iface_desc->endpoint[1].desc; + if (!usb_endpoint_is_bulk_in(epd)) + goto failed; + epd = &iface_desc->endpoint[2].desc; + if (!usb_endpoint_is_bulk_out(epd)) + goto failed; + dev = interface_to_usbdev(interface); wlandev = create_wlan(); if (!wlandev) { diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 0b3a6265584359c25f3666b00b37100f5e706d60..12448ccd27f16806f715dcd211ba0b426b47396d 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -216,11 +216,16 @@ static int int3400_thermal_run_osc(acpi_handle handle, acpi_status status; int result = 0; struct acpi_osc_context context = { - .uuid_str = int3400_thermal_uuids[uuid], + .uuid_str = NULL, .rev = 1, .cap.length = 8, }; + if (uuid < 0 || uuid >= INT3400_THERMAL_MAXIMUM_UUID) + return -EINVAL; + + context.uuid_str = int3400_thermal_uuids[uuid]; + buf[OSC_QUERY_DWORD] = 0; buf[OSC_SUPPORT_DWORD] = enable; diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index f86cbb125e2ff31281baeac3bda7c022450240b9..ec1d58c4ceaae1e7155941b40696f94f4fe854e8 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -74,7 +74,7 @@ static void int3403_notify(acpi_handle handle, THERMAL_TRIP_CHANGED); break; default: - dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); + dev_dbg(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); break; } } diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c index 6b7ef1993d7e23dca2c801f1fd6215266c1ebdc4..42c9cd0e5f7754c5c7087224c9cc6227d967e3fe 100644 --- a/drivers/thermal/mtk_thermal.c +++ b/drivers/thermal/mtk_thermal.c @@ -594,8 +594,7 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank) u32 raw; for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) { - raw = readl(mt->thermal_base + - conf->msr[conf->bank_data[bank->id].sensors[i]]); + raw = readl(mt->thermal_base + conf->msr[i]); temp = raw_to_mcelsius(mt, conf->bank_data[bank->id].sensors[i], @@ -736,8 +735,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num, for (i = 0; i < conf->bank_data[num].num_sensors; i++) writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]], - mt->thermal_base + - conf->adcpnp[conf->bank_data[num].sensors[i]]); + mt->thermal_base + conf->adcpnp[i]); writel((1 << conf->bank_data[num].num_sensors) - 1, controller_base + TEMP_MONCTL0); diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index dbe90bcf4ad42a86096a13359f472cc6f38293e9..c144ca9b032ce0ede441ae1c8afb73285cb129ed 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -913,21 +913,21 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) * case. */ path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, - &tunnel->dst_port, "USB3 Up"); + &tunnel->dst_port, "USB3 Down"); if (!path) { /* Just disable the downstream port */ tb_usb3_port_enable(down, false); goto err_free; } - tunnel->paths[TB_USB3_PATH_UP] = path; - tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); + tunnel->paths[TB_USB3_PATH_DOWN] = path; + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, - "USB3 Down"); + "USB3 Up"); if (!path) goto err_deactivate; - tunnel->paths[TB_USB3_PATH_DOWN] = path; - tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); + tunnel->paths[TB_USB3_PATH_UP] = path; + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); /* Validate that the tunnel is complete */ if (!tb_port_is_usb3_up(tunnel->dst_port)) { diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index fc118f6498877f5559b51b1e52900976ab989839..cae61d1ebec5a98ef57caf242e9b3626e8bd0ca6 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -524,6 +524,7 @@ static void __init serial8250_isa_init_ports(void) */ up->mcr_mask = ~ALPHA_KLUDGE_MCR; up->mcr_force = ALPHA_KLUDGE_MCR; + serial8250_set_defaults(up); } /* chain base port ops to support Remote Supervisor Adapter */ @@ -547,7 +548,6 @@ static void __init serial8250_isa_init_ports(void) port->membase = old_serial_port[i].iomem_base; port->iotype = old_serial_port[i].io_type; port->regshift = old_serial_port[i].iomem_reg_shift; - serial8250_set_defaults(up); port->irqflags |= irqflag; if (serial8250_isa_config != NULL) diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index ddb6aeb76dc53a8c41ca5d9f7833ce2eefea400f..04b9af7ed94153dac7e045bce7d8e11132f3e057 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -326,7 +326,17 @@ static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p) * devices will export them as GPIOs, so we pre-configure them safely * as inputs. */ - u8 dir = pcidev->vendor == PCI_VENDOR_ID_EXAR ? 0xff : 0x00; + + u8 dir = 0x00; + + if ((pcidev->vendor == PCI_VENDOR_ID_EXAR) && + (pcidev->subsystem_vendor != PCI_VENDOR_ID_SEALEVEL)) { + // Configure GPIO as inputs for Commtech adapters + dir = 0xff; + } else { + // Configure GPIO as outputs for SeaLevel adapters + dir = 0x00; + } writeb(0x00, p + UART_EXAR_MPIOINT_7_0); writeb(0x00, p + UART_EXAR_MPIOLVL_7_0); diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index f839380c2f4c159a1ca617f555d8d45a5c1393e8..98b8a3e30733bde03c94eace68e3fcf31a94e2e8 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -306,8 +306,21 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, } #endif + /* + * Store the requested baud rate before calling the generic 8250 + * set_termios method. Standard 8250 port expects bauds to be + * no higher than (uartclk / 16) so the baud will be clamped if it + * gets out of that bound. Mediatek 8250 port supports speed + * higher than that, therefore we'll get original baud rate back + * after calling the generic set_termios method and recalculate + * the speed later in this method. + */ + baud = tty_termios_baud_rate(termios); + serial8250_do_set_termios(port, termios, old); + tty_termios_encode_baud_rate(termios, baud, baud); + /* * Mediatek UARTs use an extra highspeed register (MTK_UART_HIGHS) * @@ -339,6 +352,11 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, */ spin_lock_irqsave(&port->lock, flags); + /* + * Update the per-port timeout. + */ + uart_update_timeout(port, termios->c_cflag, baud); + /* set DLAB we have cval saved in up->lcr from the call to the core */ serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); serial_dl_write(up, quot); diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index a04f74d2e854d7d35fd72c11461c7631e58ccc50..4df47d02b34b40581649310a40e8a91e098a83ee 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c @@ -1215,7 +1215,12 @@ static int cpm_uart_init_port(struct device_node *np, pinfo->gpios[i] = NULL; - gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS); + gpiod = devm_gpiod_get_index_optional(dev, NULL, i, GPIOD_ASIS); + + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + goto out_irq; + } if (gpiod) { if (i == GPIO_RTS || i == GPIO_DTR) @@ -1237,6 +1242,8 @@ static int cpm_uart_init_port(struct device_node *np, return cpm_uart_request_port(&pinfo->port); +out_irq: + irq_dispose_mapping(pinfo->port.irq); out_pram: cpm_uart_unmap_pram(pinfo, pram); out_mem: diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index b4f835e7de23410a5f2fb0a659e844b1f637b6a8..b784323a6a7b03ed7350733668b0f4980caaee19 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -1698,21 +1698,21 @@ static int mxs_auart_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; - goto out_disable_clks; + goto out_iounmap; } s->port.irq = irq; ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s); if (ret) - goto out_disable_clks; + goto out_iounmap; platform_set_drvdata(pdev, s); ret = mxs_auart_init_gpios(s, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize GPIOs.\n"); - goto out_disable_clks; + goto out_iounmap; } /* @@ -1720,7 +1720,7 @@ static int mxs_auart_probe(struct platform_device *pdev) */ ret = mxs_auart_request_gpio_irq(s); if (ret) - goto out_disable_clks; + goto out_iounmap; auart_port[s->port.line] = s; @@ -1746,6 +1746,9 @@ static int mxs_auart_probe(struct platform_device *pdev) mxs_auart_free_gpio_irq(s); auart_port[pdev->id] = NULL; +out_iounmap: + iounmap(s->port.membase); + out_disable_clks: if (is_asm9260_auart(s)) { clk_disable_unprepare(s->clk); @@ -1761,6 +1764,7 @@ static int mxs_auart_remove(struct platform_device *pdev) uart_remove_one_port(&auart_driver, &s->port); auart_port[pdev->id] = NULL; mxs_auart_free_gpio_irq(s); + iounmap(s->port.membase); if (is_asm9260_auart(s)) { clk_disable_unprepare(s->clk); clk_disable_unprepare(s->clk_ahb); diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 8de8bac9c6c7200fd84530645d79d4d7b6b80eb7..04d1b0807e664c71248ed2f82b574dbbd0b88a33 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -635,7 +635,7 @@ static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup) } static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, - struct tty_port *tty) + struct tty_port *port) { do { char flag = TTY_NORMAL; @@ -653,16 +653,18 @@ static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup, ch = (unsigned char) tegra_uart_read(tup, UART_RX); tup->uport.icount.rx++; - if (!uart_handle_sysrq_char(&tup->uport, ch) && tty) - tty_insert_flip_char(tty, ch, flag); + if (uart_handle_sysrq_char(&tup->uport, ch)) + continue; if (tup->uport.ignore_status_mask & UART_LSR_DR) continue; + + tty_insert_flip_char(port, ch, flag); } while (1); } static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, - struct tty_port *tty, + struct tty_port *port, unsigned int count) { int copied; @@ -672,17 +674,13 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup, return; tup->uport.icount.rx += count; - if (!tty) { - dev_err(tup->uport.dev, "No tty port\n"); - return; - } if (tup->uport.ignore_status_mask & UART_LSR_DR) return; dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys, count, DMA_FROM_DEVICE); - copied = tty_insert_flip_string(tty, + copied = tty_insert_flip_string(port, ((unsigned char *)(tup->rx_dma_buf_virt)), count); if (copied != count) { WARN_ON(1); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 57840cf903881c4e20a5fd22509ee039fa1bb5e4..5f3daabdc916e3eba27bb982b14dde7a4e7fb243 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -41,8 +41,6 @@ static struct lock_class_key port_lock_key; #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) -#define SYSRQ_TIMEOUT (HZ * 5) - static void uart_change_speed(struct tty_struct *tty, struct uart_state *state, struct ktermios *old_termios); static void uart_wait_until_sent(struct tty_struct *tty, int timeout); @@ -1916,6 +1914,12 @@ static inline bool uart_console_enabled(struct uart_port *port) return uart_console(port) && (port->cons->flags & CON_ENABLED); } +static void __uart_port_spin_lock_init(struct uart_port *port) +{ + spin_lock_init(&port->lock); + lockdep_set_class(&port->lock, &port_lock_key); +} + /* * Ensure that the serial console lock is initialised early. * If this port is a console, then the spinlock is already initialised. @@ -1925,8 +1929,7 @@ static inline void uart_port_spin_lock_init(struct uart_port *port) if (uart_console(port)) return; - spin_lock_init(&port->lock); - lockdep_set_class(&port->lock, &port_lock_key); + __uart_port_spin_lock_init(port); } #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL) @@ -2372,6 +2375,13 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, /* Power up port for set_mctrl() */ uart_change_pm(state, UART_PM_STATE_ON); + /* + * If this driver supports console, and it hasn't been + * successfully registered yet, initialise spin lock for it. + */ + if (port->cons && !(port->cons->flags & CON_ENABLED)) + __uart_port_spin_lock_init(port); + /* * Ensure that the modem control lines are de-activated. * keep the DTR setting that is set in uart_set_options() @@ -3163,7 +3173,7 @@ static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on); * Returns false if @ch is out of enabling sequence and should be * handled some other way, true if @ch was consumed. */ -static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) +bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) { int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq); @@ -3186,99 +3196,9 @@ static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) port->sysrq = 0; return true; } -#else -static inline bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch) -{ - return false; -} +EXPORT_SYMBOL_GPL(uart_try_toggle_sysrq); #endif -int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) -{ - if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL)) - return 0; - - if (!port->has_sysrq || !port->sysrq) - return 0; - - if (ch && time_before(jiffies, port->sysrq)) { - if (sysrq_mask()) { - handle_sysrq(ch); - port->sysrq = 0; - return 1; - } - if (uart_try_toggle_sysrq(port, ch)) - return 1; - } - port->sysrq = 0; - - return 0; -} -EXPORT_SYMBOL_GPL(uart_handle_sysrq_char); - -int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) -{ - if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL)) - return 0; - - if (!port->has_sysrq || !port->sysrq) - return 0; - - if (ch && time_before(jiffies, port->sysrq)) { - if (sysrq_mask()) { - port->sysrq_ch = ch; - port->sysrq = 0; - return 1; - } - if (uart_try_toggle_sysrq(port, ch)) - return 1; - } - port->sysrq = 0; - - return 0; -} -EXPORT_SYMBOL_GPL(uart_prepare_sysrq_char); - -void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags) -__releases(&port->lock) -{ - if (port->has_sysrq) { - int sysrq_ch = port->sysrq_ch; - - port->sysrq_ch = 0; - spin_unlock_irqrestore(&port->lock, flags); - if (sysrq_ch) - handle_sysrq(sysrq_ch); - } else { - spin_unlock_irqrestore(&port->lock, flags); - } -} -EXPORT_SYMBOL_GPL(uart_unlock_and_check_sysrq); - -/* - * We do the SysRQ and SAK checking like this... - */ -int uart_handle_break(struct uart_port *port) -{ - struct uart_state *state = port->state; - - if (port->handle_break) - port->handle_break(port); - - if (port->has_sysrq && uart_console(port)) { - if (!port->sysrq) { - port->sysrq = jiffies + SYSRQ_TIMEOUT; - return 1; - } - port->sysrq = 0; - } - - if (port->flags & UPF_SAK) - do_SAK(state->port.tty); - return 0; -} -EXPORT_SYMBOL_GPL(uart_handle_break); - EXPORT_SYMBOL(uart_write_wakeup); EXPORT_SYMBOL(uart_register_driver); EXPORT_SYMBOL(uart_unregister_driver); @@ -3289,8 +3209,7 @@ EXPORT_SYMBOL(uart_remove_one_port); /** * uart_get_rs485_mode() - retrieve rs485 properties for given uart - * @dev: uart device - * @rs485conf: output parameter + * @port: uart device's target port * * This function implements the device tree binding described in * Documentation/devicetree/bindings/serial/rs485.txt. diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index e1179e74a2b89d892f4c8e0edceb3378c6755e5a..204bb68ce3ca91a1d9338658154bd5465109e4d3 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -3301,6 +3301,9 @@ static int sci_probe_single(struct platform_device *dev, sciport->port.flags |= UPF_HARD_FLOW; } + if (sci_uart_driver.cons->index == sciport->port.line) + spin_lock_init(&sciport->port.lock); + ret = uart_add_one_port(&sci_uart_driver, &sciport->port); if (ret) { sci_cleanup_single(sciport); diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index b9d672af8b655d60428a43788c2cd56d763d34d4..2833f1418d6d9570ac9ce4f1e3449055e09ddd93 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1465,7 +1465,6 @@ static int cdns_uart_probe(struct platform_device *pdev) cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE cdns_uart_uart_driver.cons = &cdns_uart_console; - cdns_uart_console.index = id; #endif rc = uart_register_driver(&cdns_uart_uart_driver); @@ -1581,8 +1580,10 @@ static int cdns_uart_probe(struct platform_device *pdev) * If register_console() don't assign value, then console_port pointer * is cleanup. */ - if (!console_port) + if (!console_port) { + cdns_uart_console.index = id; console_port = port; + } #endif rc = uart_add_one_port(&cdns_uart_uart_driver, port); @@ -1595,8 +1596,10 @@ static int cdns_uart_probe(struct platform_device *pdev) #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE /* This is not port which is used for console that's why clean it up */ if (console_port == port && - !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) + !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) { console_port = NULL; + cdns_uart_console.index = -1; + } #endif cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node, diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 48a8199f7845dff3d4cefd892db27257962d18a6..42d8c67a481f03c654850c56557419079bc71e79 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1092,10 +1092,19 @@ static const struct tty_port_operations vc_port_ops = { .destruct = vc_port_destruct, }; +/* + * Change # of rows and columns (0 means unchanged/the size of fg_console) + * [this is to be used together with some user program + * like resize that changes the hardware videomode] + */ +#define VC_MAXCOL (32767) +#define VC_MAXROW (32767) + int vc_allocate(unsigned int currcons) /* return 0 on success */ { struct vt_notifier_param param; struct vc_data *vc; + int err; WARN_CONSOLE_UNLOCKED(); @@ -1125,6 +1134,11 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ if (!*vc->vc_uni_pagedir_loc) con_set_default_unimap(vc); + err = -EINVAL; + if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW || + vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size) + goto err_free; + err = -ENOMEM; vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; @@ -1143,7 +1157,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ visual_deinit(vc); kfree(vc); vc_cons[currcons].d = NULL; - return -ENOMEM; + return err; } static inline int resize_screen(struct vc_data *vc, int width, int height, @@ -1158,14 +1172,6 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, return err; } -/* - * Change # of rows and columns (0 means unchanged/the size of fg_console) - * [this is to be used together with some user program - * like resize that changes the hardware videomode] - */ -#define VC_RESIZE_MAXCOL (32767) -#define VC_RESIZE_MAXROW (32767) - /** * vc_do_resize - resizing method for the tty * @tty: tty being resized @@ -1201,7 +1207,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, user = vc->vc_resize_user; vc->vc_resize_user = 0; - if (cols > VC_RESIZE_MAXCOL || lines > VC_RESIZE_MAXROW) + if (cols > VC_MAXCOL || lines > VC_MAXROW) return -EINVAL; new_cols = (cols ? cols : vc->vc_cols); @@ -1212,7 +1218,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) return 0; - if (new_screen_size > KMALLOC_MAX_SIZE) + if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) @@ -3393,6 +3399,7 @@ static int __init con_init(void) INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); tty_port_init(&vc->port); visual_init(vc, currcons, 1); + /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); vc_init(vc, vc->vc_rows, vc->vc_cols, currcons || !vc->vc_sw->con_save_screen); diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c index ae319ef3a832b4807250bcb732a90f94af0b3b29..b60173bc93cef1357ec228936366fc6c1275777c 100644 --- a/drivers/uio/uio_pdrv_genirq.c +++ b/drivers/uio/uio_pdrv_genirq.c @@ -159,9 +159,9 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev) priv->pdev = pdev; if (!uioinfo->irq) { - ret = platform_get_irq(pdev, 0); + ret = platform_get_irq_optional(pdev, 0); uioinfo->irq = ret; - if (ret == -ENXIO && pdev->dev.of_node) + if (ret == -ENXIO) uioinfo->irq = UIO_IRQ_NONE; else if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c index 633c52de3bb36fd9495d378843ae47379d812468..9865750bc31e0aa9dee451d403d758f76ac20c0d 100644 --- a/drivers/usb/c67x00/c67x00-sched.c +++ b/drivers/usb/c67x00/c67x00-sched.c @@ -486,7 +486,7 @@ c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status) c67x00_release_urb(c67x00, urb); usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb); spin_unlock(&c67x00->lock); - usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status); + usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status); spin_lock(&c67x00->lock); } diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c index 61ec5bb2b0ca351ca1e69481feada828f1a29fad..5aa69980e7ff535b527d2a082fa5f9d1c1e4cd2a 100644 --- a/drivers/usb/cdns3/ep0.c +++ b/drivers/usb/cdns3/ep0.c @@ -37,18 +37,18 @@ static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev, struct cdns3_usb_regs __iomem *regs = priv_dev->regs; struct cdns3_endpoint *priv_ep = priv_dev->eps[0]; - priv_ep->trb_pool[0].buffer = TRB_BUFFER(dma_addr); - priv_ep->trb_pool[0].length = TRB_LEN(length); + priv_ep->trb_pool[0].buffer = cpu_to_le32(TRB_BUFFER(dma_addr)); + priv_ep->trb_pool[0].length = cpu_to_le32(TRB_LEN(length)); if (zlp) { - priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_TYPE(TRB_NORMAL); - priv_ep->trb_pool[1].buffer = TRB_BUFFER(dma_addr); - priv_ep->trb_pool[1].length = TRB_LEN(0); - priv_ep->trb_pool[1].control = TRB_CYCLE | TRB_IOC | - TRB_TYPE(TRB_NORMAL); + priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_NORMAL)); + priv_ep->trb_pool[1].buffer = cpu_to_le32(TRB_BUFFER(dma_addr)); + priv_ep->trb_pool[1].length = cpu_to_le32(TRB_LEN(0)); + priv_ep->trb_pool[1].control = cpu_to_le32(TRB_CYCLE | TRB_IOC | + TRB_TYPE(TRB_NORMAL)); } else { - priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_IOC | - TRB_TYPE(TRB_NORMAL); + priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_IOC | + TRB_TYPE(TRB_NORMAL)); priv_ep->trb_pool[1].control = 0; } @@ -264,11 +264,11 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, case USB_RECIP_INTERFACE: return cdns3_ep0_delegate_req(priv_dev, ctrl); case USB_RECIP_ENDPOINT: - index = cdns3_ep_addr_to_index(ctrl->wIndex); + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; /* check if endpoint is stalled or stall is pending */ - cdns3_select_ep(priv_dev, ctrl->wIndex); + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) || (priv_ep->flags & EP_STALL_PENDING)) usb_status = BIT(USB_ENDPOINT_HALT); @@ -381,10 +381,10 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev, if (!(ctrl->wIndex & ~USB_DIR_IN)) return 0; - index = cdns3_ep_addr_to_index(ctrl->wIndex); + index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; - cdns3_select_ep(priv_dev, ctrl->wIndex); + cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); if (set) __cdns3_gadget_ep_set_halt(priv_ep); @@ -445,7 +445,7 @@ static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev, if (priv_dev->gadget.state < USB_STATE_ADDRESS) return -EINVAL; - if (ctrl_req->wLength != 6) { + if (le16_to_cpu(ctrl_req->wLength) != 6) { dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n", ctrl_req->wLength); return -EINVAL; @@ -469,7 +469,7 @@ static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev, if (ctrl_req->wIndex || ctrl_req->wLength) return -EINVAL; - priv_dev->isoch_delay = ctrl_req->wValue; + priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue); return 0; } diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h index 755c565822575589f28e63fff85aa9bb237fa77e..0a2a3269bfac61fcbbd5712b5824ddb0694dacd0 100644 --- a/drivers/usb/cdns3/trace.h +++ b/drivers/usb/cdns3/trace.h @@ -404,9 +404,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb, TP_fast_assign( __assign_str(name, priv_ep->name); __entry->trb = trb; - __entry->buffer = trb->buffer; - __entry->length = trb->length; - __entry->control = trb->control; + __entry->buffer = le32_to_cpu(trb->buffer); + __entry->length = le32_to_cpu(trb->length); + __entry->control = le32_to_cpu(trb->control); __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); __entry->last_stream_id = priv_ep->last_stream_id; ), diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 9a7c53d09ab4f29f41e435257129e1642cf3fb5a..bb133245beed5c1d21f33f74d21191c9a7794427 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1243,6 +1243,29 @@ static void ci_controller_suspend(struct ci_hdrc *ci) enable_irq(ci->irq); } +/* + * Handle the wakeup interrupt triggered by extcon connector + * We need to call ci_irq again for extcon since the first + * interrupt (wakeup int) only let the controller be out of + * low power mode, but not handle any interrupts. + */ +static void ci_extcon_wakeup_int(struct ci_hdrc *ci) +{ + struct ci_hdrc_cable *cable_id, *cable_vbus; + u32 otgsc = hw_read_otgsc(ci, ~0); + + cable_id = &ci->platdata->id_extcon; + cable_vbus = &ci->platdata->vbus_extcon; + + if (!IS_ERR(cable_id->edev) && ci->is_otg && + (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) + ci_irq(ci->irq, ci); + + if (!IS_ERR(cable_vbus->edev) && ci->is_otg && + (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) + ci_irq(ci->irq, ci); +} + static int ci_controller_resume(struct device *dev) { struct ci_hdrc *ci = dev_get_drvdata(dev); @@ -1275,6 +1298,7 @@ static int ci_controller_resume(struct device *dev) enable_irq(ci->irq); if (ci_otg_is_fsm_mode(ci)) ci_otg_fsm_wakeup_by_srp(ci); + ci_extcon_wakeup_int(ci); } return 0; diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index c347d93eae64659a1f4c3752e9564444baebd16a..cb8ddbd537187de004e2acc1a0f2885debb667ef 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -342,7 +342,8 @@ static void dwc2_driver_shutdown(struct platform_device *dev) { struct dwc2_hsotg *hsotg = platform_get_drvdata(dev); - disable_irq(hsotg->irq); + dwc2_disable_global_interrupts(hsotg); + synchronize_irq(hsotg->irq); } /** diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 96c05b121fac85137eedfc5b9e13cdc1dda12c39..139474c3e77b1c8693ac284114159af9a22ae702 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -38,6 +38,8 @@ #define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee #define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee +#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee +#define PCI_DEVICE_ID_INTEL_JSP 0x4dee #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 @@ -358,6 +360,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP), (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB), (kernel_ulong_t) &dwc3_pci_amd_properties, }, { } /* Terminating Entry */ diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c index 349deae7cabd89c9a1d6d8ea120c413038f89740..e2d7f69128a0e85b36088325dba484697280e9bf 100644 --- a/drivers/usb/gadget/function/f_uac1_legacy.c +++ b/drivers/usb/gadget/function/f_uac1_legacy.c @@ -336,7 +336,9 @@ static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req) /* Copy buffer is full, add it to the play_queue */ if (audio_buf_size - copy_buf->actual < req->actual) { + spin_lock_irq(&audio->lock); list_add_tail(©_buf->list, &audio->play_queue); + spin_unlock_irq(&audio->lock); schedule_work(&audio->playback_work); copy_buf = f_audio_buffer_alloc(audio_buf_size); if (IS_ERR(copy_buf)) diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index d69f61ff018193d689f64936140d4858b9d41211..9342a3d24963934326df7db62aee7135d4182d15 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -676,13 +676,7 @@ static int usba_ep_disable(struct usb_ep *_ep) if (!ep->ep.desc) { spin_unlock_irqrestore(&udc->lock, flags); - /* REVISIT because this driver disables endpoints in - * reset_all_endpoints() before calling disconnect(), - * most gadget drivers would trigger this non-error ... - */ - if (udc->gadget.speed != USB_SPEED_UNKNOWN) - DBG(DBG_ERR, "ep_disable: %s not enabled\n", - ep->ep.name); + DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name); return -EINVAL; } ep->ep.desc = NULL; @@ -871,7 +865,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) u32 status; DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", - ep->ep.name, req); + ep->ep.name, _req); spin_lock_irqsave(&udc->lock, flags); diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 7164ad9800f11a8633d7f551b61011aba45e5d80..7419889ebe9ab029592610b20684e31951149edd 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -1980,9 +1980,12 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit) if (num == 0) { _req = gr_alloc_request(&ep->ep, GFP_ATOMIC); + if (!_req) + return -ENOMEM; + buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC); - if (!_req || !buf) { - /* possible _req freed by gr_probe via gr_remove */ + if (!buf) { + gr_free_request(&ep->ep, _req); return -ENOMEM; } diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c index 58a4d332509026fc1fd332327a14bdab1aa2ad74..119505fac777681a6c9a9b06ff3fbdec02e68cb1 100644 --- a/drivers/usb/gadget/usbstring.c +++ b/drivers/usb/gadget/usbstring.c @@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_get_string); /** * usb_validate_langid - validate usb language identifiers - * @lang: usb language identifier + * @langid: usb language identifier * * Returns true for valid language identifier, otherwise false. */ diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index fea555570ad4f7df464ff4a57d34755817cda211..45c54d56ecbd55fd8241983c2c3ce42d76ffb809 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -557,6 +557,10 @@ static bool need_bw_sch(struct usb_host_endpoint *ep, if (is_fs_or_ls(speed) && !has_tt) return false; + /* skip endpoint with zero maxpkt */ + if (usb_endpoint_maxp(&ep->desc) == 0) + return false; + return true; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index ef513c2fb843c04690ff2de3609e092a7a610c69..9234c82e70e413a7ad3ef4dcea0834f11da55ea7 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -265,6 +265,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1142) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == 0x2142) + xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 2eaf5c0af80ce156b609a85c8239e199c1e478eb..ee6bf01775bba1a61d91c86b3eae27d85c7e16b2 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -856,7 +856,7 @@ static int tegra_xusb_init_context(struct tegra_xusb *tegra) if (!tegra->context.ipfs) return -ENOMEM; - tegra->context.fpci = devm_kcalloc(tegra->dev, soc->ipfs.num_offsets, + tegra->context.fpci = devm_kcalloc(tegra->dev, soc->fpci.num_offsets, sizeof(u32), GFP_KERNEL); if (!tegra->context.fpci) return -ENOMEM; diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 89675ee29645ae59bf1125921511950f42c2b2e8..8fbaef5c9d69353b4bdc8ebc17f4ab87b34689e7 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -77,6 +77,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x4348, 0x5523) }, + { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, { USB_DEVICE(0x1a86, 0x5523) }, { }, diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 216edd5826cacdbfaf5bbe2903d372f235d229d7..ecda82198798e6df35698d181f89cef7d797f1b8 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -59,6 +59,7 @@ static const struct usb_device_id id_table_earthmate[] = { static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { } /* Terminating entry */ @@ -73,6 +74,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, + { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h index 35e223751c0e7988b254ab24a0c781a5bea8b4f3..16b7410ad0575a8d4948f39d1774f668c1c22a70 100644 --- a/drivers/usb/serial/cypress_m8.h +++ b/drivers/usb/serial/cypress_m8.h @@ -25,6 +25,9 @@ #define VENDOR_ID_CYPRESS 0x04b4 #define PRODUCT_ID_CYPHIDCOM 0x5500 +/* Simply Automated HID->COM UPB PIM (using Cypress PID 0x5500) */ +#define VENDOR_ID_SAI 0x17dd + /* FRWD Dongle - a GPS sports watch */ #define VENDOR_ID_FRWD 0x6737 #define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001 diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index d5bff69b1769b28610ec0d874a97ce5e385d782a..b8dfeb4fb2ed6e1301df7eebc191969d4c478398 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -697,14 +697,16 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port, struct iuu_private *priv = usb_get_serial_port_data(port); unsigned long flags; - if (count > 256) - return -ENOMEM; - spin_lock_irqsave(&priv->lock, flags); + count = min(count, 256 - priv->writelen); + if (count == 0) + goto out; + /* fill the buffer */ memcpy(priv->writebuf + priv->writelen, buf, count); priv->writelen += count; +out: spin_unlock_irqrestore(&priv->lock, flags); return count; diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 254a8bbeea676c3e3395c2685c878a1b8366f650..9b7cee98ea6072f3006cf54f2700be3dfb363b02 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -245,6 +245,7 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 @@ -1097,6 +1098,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), .driver_info = RSVD(4) }, + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95), + .driver_info = RSVD(4) }, { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), .driver_info = RSVD(4) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), @@ -2028,6 +2031,9 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(4) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */ .driver_info = RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ + { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index f634c81998bbf6ef5879567666b4890f96977a69..de881a6cff35778fbe1431cd62edcb3660aab103 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -521,14 +521,19 @@ static void vfio_pci_release(void *device_data) vfio_pci_vf_token_user_add(vdev, -1); vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); + mutex_lock(&vdev->igate); if (vdev->err_trigger) { eventfd_ctx_put(vdev->err_trigger); vdev->err_trigger = NULL; } + mutex_unlock(&vdev->igate); + + mutex_lock(&vdev->igate); if (vdev->req_trigger) { eventfd_ctx_put(vdev->req_trigger); vdev->req_trigger = NULL; } + mutex_unlock(&vdev->igate); } mutex_unlock(&vdev->reflck->lock); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 6fb4d7ecfa19fe3de5f44869bddc527ec792d4ee..b22adf03f58425bf3969fbb6df7496ea2c277963 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) continue; } - switch (v_req.type) { + switch (vhost32_to_cpu(vq, v_req.type)) { case VIRTIO_SCSI_T_TMF: vc.req = &v_req.tmf; vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req); diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index ca935c09a261c7b5f341b47fe344397b8a9ad3ac..35ebeeccde4dfda132c9e2c9dcd988f909d2d98a 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -216,7 +216,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = info->var.xoffset + rs; region.dy = 0; region.width = rw; @@ -224,7 +224,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset + bs; region.width = rs; diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index dfa9a8aa4509c1705ec2beeda76ff3cf03e60e65..78f3a5621478228f139602405030da219ad95d21 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -201,7 +201,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset; region.height = rw; @@ -209,7 +209,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset + bs; region.dy = 0; region.height = info->var.yres_virtual; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index ce08251bfd38df4f1c79705a5f64593d1434d667..fd098ff17574b7c19042448d82f7a1e4238bc5a7 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -184,7 +184,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dx = 0; region.dy = info->var.yoffset + rs; region.height = rw; @@ -192,7 +192,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dx = info->var.xoffset; region.dy = info->var.yoffset; region.height = info->var.yres; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 1936afc78fec5ca0b4b8d1cf17c4cc5f92a1538e..e165a3fad29adfdee870b6b737561b6bea938aa8 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -231,7 +231,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, region.color = color; region.rop = ROP_COPY; - if (rw && !bottom_only) { + if ((int) rw > 0 && !bottom_only) { region.dy = 0; region.dx = info->var.xoffset; region.width = rw; @@ -239,7 +239,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, info->fbops->fb_fillrect(info, ®ion); } - if (bh) { + if ((int) bh > 0) { region.dy = info->var.yoffset; region.dx = info->var.xoffset; region.height = bh; diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index b690a8a4bf9e37a3dc0d962d09fa3fadaa37c093..18ebd7a6af98012b9b5d423581319b06260e86d0 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -1444,7 +1444,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, or_mask = caps->u.in.or_mask; not_mask = caps->u.in.not_mask; - if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) + if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) return -EINVAL; ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, @@ -1520,7 +1520,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */ if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) || - req == VBG_IOCTL_VMMDEV_REQUEST_BIG) + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT) return vbg_ioctl_vmmrequest(gdev, session, data); if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) @@ -1558,6 +1559,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) case VBG_IOCTL_HGCM_CALL(0): return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); case VBG_IOCTL_LOG(0): + case VBG_IOCTL_LOG_ALT(0): return vbg_ioctl_log(data); } diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h index 4188c12b839f7e74f845cc9524c1917b188dae95..77c3a9c8255dc92961677b347bd4b7008dd44ffb 100644 --- a/drivers/virt/vboxguest/vboxguest_core.h +++ b/drivers/virt/vboxguest/vboxguest_core.h @@ -15,6 +15,21 @@ #include #include "vmmdev.h" +/* + * The mainline kernel version (this version) of the vboxguest module + * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and + * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead + * of _IO(V, ...) as the out of tree VirtualBox upstream version does. + * + * These _ALT definitions keep compatibility with the wrong defines the + * mainline kernel version used for a while. + * Note the VirtualBox userspace bits have always been built against + * VirtualBox upstream's headers, so this is likely not necessary. But + * we must never break our ABI so we keep these around to be 100% sure. + */ +#define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) +#define VBG_IOCTL_LOG_ALT(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) + struct vbg_session; /** VBox guest memory balloon. */ diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index 6e8c0f1c1056296e983fd70af5de7c405392c3ee..32c2c52f7e84ecb3dfb215f6171eaa242e289411 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -131,7 +131,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, * the need for a bounce-buffer and another copy later on. */ is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || - req == VBG_IOCTL_VMMDEV_REQUEST_BIG; + req == VBG_IOCTL_VMMDEV_REQUEST_BIG || + req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT; if (is_vmmdev_req) buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT, diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h index 6337b8d75d960bdefc5c8119a187d185ff918dfb..21f408120e3f5acf9d605342d89d4a1e1b271f5f 100644 --- a/drivers/virt/vboxguest/vmmdev.h +++ b/drivers/virt/vboxguest/vmmdev.h @@ -206,6 +206,8 @@ VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8); * not. */ #define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2) +/* The mask of valid capabilities, for sanity checking. */ +#define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U /** struct vmmdev_hypervisorinfo - Hypervisor info structure. */ struct vmmdev_hypervisorinfo { diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 5809e5f5b157ebc7a77829e4ff5aa96d0a9ea898..5c92e4a50882dffe145fb21c3a6372560aa266c5 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -85,7 +85,7 @@ config VIRTIO_MEM depends on VIRTIO depends on MEMORY_HOTPLUG_SPARSE depends on MEMORY_HOTREMOVE - select CONTIG_ALLOC + depends on CONTIG_ALLOC help This driver provides access to virtio-mem paravirtualized memory devices, allowing to hotplug and hotunplug memory. diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 1f157d2f495264f725023c41070bd228574f3c56..8be02f333b7a3b393ab367d8becce8cf5ca71b19 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -578,10 +578,14 @@ static int init_vqs(struct virtio_balloon *vb) static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) { if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, - &vb->config_read_bitmap)) + &vb->config_read_bitmap)) { virtio_cread(vb->vdev, struct virtio_balloon_config, free_page_hint_cmd_id, &vb->cmd_id_received_cache); + /* Legacy balloon config space is LE, unlike all other devices. */ + if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) + vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache); + } return vb->cmd_id_received_cache; } @@ -974,6 +978,11 @@ static int virtballoon_probe(struct virtio_device *vdev) /* * Let the hypervisor know that we are expecting a * specific value to be written back in balloon pages. + * + * If the PAGE_POISON value was larger than a byte we would + * need to byte swap poison_val here to guarantee it is + * little-endian. However for now it is a single byte so we + * can pass it as-is. */ if (!want_init_on_free()) memset(&poison_val, PAGE_POISON, sizeof(poison_val)); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 9d16aaffca9d7235378e7512829db5f30cc55c91..627ac04874940323f7260d4811c02f53677668c9 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -641,11 +641,11 @@ static int vm_cmdline_set(const char *device, &vm_cmdline_id, &consumed); /* - * sscanf() must processes at least 2 chunks; also there + * sscanf() must process at least 2 chunks; also there * must be no extra characters after the last chunk, so * str[consumed] must be '\0' */ - if (processed < 2 || str[consumed]) + if (processed < 2 || str[consumed] || irq == 0) return -EINVAL; resources[0].flags = IORESOURCE_MEM; diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 4f168b46fbca58a4a6de302cf4b2a02aaa6971ed..786fbb7d8be063f82d65a1fcaef8a627ea10c0b6 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -693,10 +693,8 @@ static int xenbus_map_ring_pv(struct xenbus_device *dev, bool leaked; area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes); - if (!area) { - kfree(node); + if (!area) return -ENOMEM; - } for (i = 0; i < nr_grefs; i++) info->phys_addrs[i] = diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c index c264839b2fd0b8e2bc73419e64acf03b0d620148..24fd163c6323e5b28616ecff2d255941c50aa40f 100644 --- a/fs/afs/fs_operation.c +++ b/fs/afs/fs_operation.c @@ -71,7 +71,7 @@ static bool afs_get_io_locks(struct afs_operation *op) swap(vnode, vnode2); if (mutex_lock_interruptible(&vnode->io_lock) < 0) { - op->error = -EINTR; + op->error = -ERESTARTSYS; op->flags |= AFS_OPERATION_STOP; _leave(" = f [I 0]"); return false; @@ -80,7 +80,7 @@ static bool afs_get_io_locks(struct afs_operation *op) if (vnode2) { if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) { - op->error = -EINTR; + op->error = -ERESTARTSYS; op->flags |= AFS_OPERATION_STOP; mutex_unlock(&vnode->io_lock); op->flags &= ~AFS_OPERATION_LOCK_0; diff --git a/fs/afs/write.c b/fs/afs/write.c index 7437806332d9b5f35a851f621f73cb8d79100e43..a121c247d95a3d63757cfd9e30557c41faa542e2 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -449,6 +449,7 @@ static int afs_store_data(struct address_space *mapping, op->store.first_offset = offset; op->store.last_to = to; op->mtime = vnode->vfs_inode.i_mtime; + op->flags |= AFS_OPERATION_UNINTR; op->ops = &afs_store_data_operation; try_next_key: diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c index b04c528b19d3425373286ec6e3c78e4eeee3c9cd..74c886f7c51cbe25334d38a9c57fcb2695f05bd4 100644 --- a/fs/autofs/waitq.c +++ b/fs/autofs/waitq.c @@ -53,7 +53,7 @@ static int autofs_write(struct autofs_sb_info *sbi, mutex_lock(&sbi->pipe_mutex); while (bytes) { - wr = __kernel_write(file, data, bytes, &file->f_pos); + wr = kernel_write(file, data, bytes, &file->f_pos); if (wr <= 0) break; data += wr; diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index d888e71e66b69306a34b4d087c13a7404bd72eb6..ea10f7bc99abf7590b89c1fa888cf8d135aac620 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1461,6 +1461,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, if (ret < 0 && ret != -ENOENT) { ulist_free(tmp); ulist_free(*roots); + *roots = NULL; return ret; } node = ulist_next(tmp, &uiter); diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index 5615320fa659f31835f9415d0208a15b80404b0b..741c7e19c32f2cb3116f9507ed3cc307b1fb5af3 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -619,6 +619,7 @@ void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info) list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs, bg_list) { list_del_init(&block_group->bg_list); + btrfs_put_block_group(block_group); btrfs_discard_queue_work(&fs_info->discard_ctl, block_group); } spin_unlock(&fs_info->unused_bgs_lock); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 608f93438b294e465d71a2c4e919f446bce4c714..60278e52c37abb8c35a1ae42098f43e622f70930 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1999,7 +1999,8 @@ static int __process_pages_contig(struct address_space *mapping, if (!PageDirty(pages[i]) || pages[i]->mapping != mapping) { unlock_page(pages[i]); - put_page(pages[i]); + for (; i < ret; i++) + put_page(pages[i]); err = -EAGAIN; goto out; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 2520605afc256ebe2d0e415051370100928f7b34..b0d2c976587e523c070a6d41df05fefeeead226b 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3509,6 +3509,7 @@ const struct file_operations btrfs_file_operations = { .read_iter = generic_file_read_iter, .splice_read = generic_file_splice_read, .write_iter = btrfs_file_write_iter, + .splice_write = iter_file_splice_write, .mmap = btrfs_file_mmap, .open = btrfs_file_open, .release = btrfs_release_file, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 18d384f4af54e1d9c606bfeec8fff79688669a7d..6862cd7e21a996badf559082318a7163f6be912e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1690,12 +1690,8 @@ static noinline int run_delalloc_nocow(struct inode *inode, ret = fallback_to_cow(inode, locked_page, cow_start, found_key.offset - 1, page_started, nr_written); - if (ret) { - if (nocow) - btrfs_dec_nocow_writers(fs_info, - disk_bytenr); + if (ret) goto error; - } cow_start = (u64)-1; } @@ -1711,9 +1707,6 @@ static noinline int run_delalloc_nocow(struct inode *inode, ram_bytes, BTRFS_COMPRESS_NONE, BTRFS_ORDERED_PREALLOC); if (IS_ERR(em)) { - if (nocow) - btrfs_dec_nocow_writers(fs_info, - disk_bytenr); ret = PTR_ERR(em); goto error; } @@ -8130,20 +8123,17 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, /* * Qgroup reserved space handler * Page here will be either - * 1) Already written to disk - * In this case, its reserved space is released from data rsv map - * and will be freed by delayed_ref handler finally. - * So even we call qgroup_free_data(), it won't decrease reserved - * space. - * 2) Not written to disk - * This means the reserved space should be freed here. However, - * if a truncate invalidates the page (by clearing PageDirty) - * and the page is accounted for while allocating extent - * in btrfs_check_data_free_space() we let delayed_ref to - * free the entire extent. + * 1) Already written to disk or ordered extent already submitted + * Then its QGROUP_RESERVED bit in io_tree is already cleaned. + * Qgroup will be handled by its qgroup_record then. + * btrfs_qgroup_free_data() call will do nothing here. + * + * 2) Not written to disk yet + * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED + * bit of its io_tree, and free the qgroup reserved data space. + * Since the IO will never happen for this page. */ - if (PageDirty(page)) - btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); + btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); if (!inode_evicting) { clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0d6e785bcb984c696ea8baa3ba7fe34b55cf5e72..f403fb1e6d379358e9e6d7173288ecb34bbde8a0 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7051,6 +7051,14 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info) mutex_lock(&uuid_mutex); mutex_lock(&fs_info->chunk_mutex); + /* + * It is possible for mount and umount to race in such a way that + * we execute this code path, but open_fs_devices failed to clear + * total_rw_bytes. We certainly want it cleared before reading the + * device items, so clear it here. + */ + fs_info->fs_devices->total_rw_bytes = 0; + /* * Read all device items, and then all the chunk items. All * device items are found before any chunk item (their object id diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index e7726f5f1241c23a92c1e486045478c274ba9c6c..3080cda9e82457c40d50c0fe3afc1c9aac8fa2e4 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -937,7 +937,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) } data = kmap(page); - ret = __kernel_write(file, data, len, &pos); + ret = kernel_write(file, data, len, &pos); kunmap(page); fput(file); if (ret != len) diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index c7a311d28d3d7d9d1c3657bae29781748bbce38d..99b3180c613adf09d7beb09954894db7bdd01acd 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -156,5 +156,5 @@ extern int cifs_truncate_page(struct address_space *mapping, loff_t from); extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.27" +#define CIFS_VERSION "2.28" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 9b0f8f33f832ceb941750fa0fb31f98aa884b937..be46fab4c96d8b794bd193b2dbb0a1c800bc88fc 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1149,20 +1149,20 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock) /* * Set the byte-range lock (posix style). Returns: - * 1) 0, if we set the lock and don't need to request to the server; - * 2) 1, if we need to request to the server; - * 3) <0, if the error occurs while setting the lock. + * 1) <0, if the error occurs while setting the lock; + * 2) 0, if we set the lock and don't need to request to the server; + * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock; + * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server. */ static int cifs_posix_lock_set(struct file *file, struct file_lock *flock) { struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); - int rc = 1; + int rc = FILE_LOCK_DEFERRED + 1; if ((flock->fl_flags & FL_POSIX) == 0) return rc; -try_again: cifs_down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) { up_write(&cinode->lock_sem); @@ -1171,13 +1171,6 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock) rc = posix_lock_file(file, flock, NULL); up_write(&cinode->lock_sem); - if (rc == FILE_LOCK_DEFERRED) { - rc = wait_event_interruptible(flock->fl_wait, - list_empty(&flock->fl_blocked_member)); - if (!rc) - goto try_again; - locks_delete_block(flock); - } return rc; } @@ -1652,7 +1645,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, int posix_lock_type; rc = cifs_posix_lock_set(file, flock); - if (!rc || rc < 0) + if (rc <= FILE_LOCK_DEFERRED) return rc; if (type & server->vals->shared_lock_type) diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 49c3ea8aa84588d08c8e37671153fc0a40bc1519..ce95801e9b6644dcd8d6e8ae79bcc3308b5a4491 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2044,7 +2044,6 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry, FILE_UNIX_BASIC_INFO *info_buf_target; unsigned int xid; int rc, tmprc; - bool new_target = d_really_is_negative(target_dentry); if (flags & ~RENAME_NOREPLACE) return -EINVAL; @@ -2121,13 +2120,8 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry, */ unlink_target: - /* - * If the target dentry was created during the rename, try - * unlinking it if it's not negative - */ - if (new_target && - d_really_is_positive(target_dentry) && - (rc == -EACCES || rc == -EEXIST)) { + /* Try unlinking the target dentry if it's not negative */ + if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) { if (d_is_dir(target_dentry)) tmprc = cifs_rmdir(target_dir, target_dentry); else diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 4a73e63c4d439cb67466a5391c4da7e8238e0f92..dcde44ff6cf9f3192e1fbb6fc63ecca8d142b5ef 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -169,6 +169,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) unsigned int xid; struct cifsFileInfo *pSMBFile = filep->private_data; struct cifs_tcon *tcon; + struct tcon_link *tlink; struct cifs_sb_info *cifs_sb; __u64 ExtAttrBits = 0; __u64 caps; @@ -307,13 +308,19 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) break; } cifs_sb = CIFS_SB(inode->i_sb); - tcon = tlink_tcon(cifs_sb_tlink(cifs_sb)); + tlink = cifs_sb_tlink(cifs_sb); + if (IS_ERR(tlink)) { + rc = PTR_ERR(tlink); + break; + } + tcon = tlink_tcon(tlink); if (tcon && tcon->ses->server->ops->notify) { rc = tcon->ses->server->ops->notify(xid, filep, (void __user *)arg); cifs_dbg(FYI, "ioctl notify rc %d\n", rc); } else rc = -EOPNOTSUPP; + cifs_put_tlink(tlink); break; default: cifs_dbg(FYI, "unsupported ioctl\n"); diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 6a39451973f8b9c4b4c778f35819aba8dd7c8421..157992864ce7e55b50805242ab1240f7d2db5171 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -354,9 +354,13 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_sync_hdr *shdr) ((struct smb2_ioctl_rsp *)shdr)->OutputCount); break; case SMB2_CHANGE_NOTIFY: + *off = le16_to_cpu( + ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset); + *len = le32_to_cpu( + ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength); + break; default: - /* BB FIXME for unimplemented cases above */ - cifs_dbg(VFS, "no length check for command\n"); + cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command)); break; } diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index d9fdafa5eb601123a00a6b5c7c3eb6c42a84fbff..32f90dc82c840fad48bf5a37b0d6c6a780bc7db7 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -2148,7 +2148,7 @@ smb3_notify(const unsigned int xid, struct file *pfile, tcon = cifs_sb_master_tcon(cifs_sb); oparms.tcon = tcon; - oparms.desired_access = FILE_READ_ATTRIBUTES; + oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA; oparms.disposition = FILE_OPEN; oparms.create_options = cifs_create_options(cifs_sb, 0); oparms.fid = &fid; diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 12c66f5d92dd2e5e1de7fc4d733bed11871adfa0..28bb5689333a59a69fa46bc6beba8e8d1f81acd2 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -201,6 +201,9 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_d_op = &efivarfs_d_ops; sb->s_time_gran = 1; + if (!efivar_supports_writes()) + sb->s_flags |= SB_RDONLY; + inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true); if (!inode) return -ENOMEM; @@ -252,9 +255,6 @@ static struct file_system_type efivarfs_type = { static __init int efivarfs_init(void) { - if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) - return -ENODEV; - if (!efivars_kobject()) return -ENODEV; diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 91ece649285d2824ff99ee5b996adeb8ff973a93..119abf0d8dd6fe5b23e1537341068f3f3060f035 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -1112,7 +1112,7 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, ret = exfat_get_next_cluster(sb, &clu.dir); } - if (ret || clu.dir != EXFAT_EOF_CLUSTER) { + if (ret || clu.dir == EXFAT_EOF_CLUSTER) { /* just initialized hint_stat */ hint_stat->clu = p_dir->dir; hint_stat->eidx = 0; diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 7579cd3bbadba87a8bfd56c0699c8186902a6763..75c7bdbeba6d3e9316c7b795cd525d1323e47b83 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -371,7 +371,7 @@ static inline bool exfat_is_last_sector_in_cluster(struct exfat_sb_info *sbi, static inline sector_t exfat_cluster_to_sector(struct exfat_sb_info *sbi, unsigned int clus) { - return ((clus - EXFAT_RESERVED_CLUSTERS) << sbi->sect_per_clus_bits) + + return ((sector_t)(clus - EXFAT_RESERVED_CLUSTERS) << sbi->sect_per_clus_bits) + sbi->data_start_sector; } diff --git a/fs/exfat/file.c b/fs/exfat/file.c index 3b7fea465fd41e2859a94afac61dd9768a9f3119..a6a063830edcb68944e55fe7308dee1312463b15 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -176,7 +176,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) ep2->dentry.stream.size = 0; } else { ep2->dentry.stream.valid_size = cpu_to_le64(new_size); - ep2->dentry.stream.size = ep->dentry.stream.valid_size; + ep2->dentry.stream.size = ep2->dentry.stream.valid_size; } if (new_size == 0) { diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c index 57b5a7a4d1f7a18b1854348bd6841ea7bc9c466a..a3c927501e676f3b240eca17106b07a296d99e6f 100644 --- a/fs/exfat/nls.c +++ b/fs/exfat/nls.c @@ -495,7 +495,7 @@ static int exfat_utf8_to_utf16(struct super_block *sb, struct exfat_uni_name *p_uniname, int *p_lossy) { int i, unilen, lossy = NLS_NAME_NO_LOSSY; - unsigned short upname[MAX_NAME_LENGTH + 1]; + __le16 upname[MAX_NAME_LENGTH + 1]; unsigned short *uniname = p_uniname->name; WARN_ON(!len); @@ -519,7 +519,7 @@ static int exfat_utf8_to_utf16(struct super_block *sb, exfat_wstrchr(bad_uni_chars, *uniname)) lossy |= NLS_NAME_LOSSY; - upname[i] = exfat_toupper(sb, *uniname); + upname[i] = cpu_to_le16(exfat_toupper(sb, *uniname)); uniname++; } @@ -597,7 +597,7 @@ static int exfat_nls_to_ucs2(struct super_block *sb, struct exfat_uni_name *p_uniname, int *p_lossy) { int i = 0, unilen = 0, lossy = NLS_NAME_NO_LOSSY; - unsigned short upname[MAX_NAME_LENGTH + 1]; + __le16 upname[MAX_NAME_LENGTH + 1]; unsigned short *uniname = p_uniname->name; struct nls_table *nls = EXFAT_SB(sb)->nls_io; @@ -611,7 +611,7 @@ static int exfat_nls_to_ucs2(struct super_block *sb, exfat_wstrchr(bad_uni_chars, *uniname)) lossy |= NLS_NAME_LOSSY; - upname[unilen] = exfat_toupper(sb, *uniname); + upname[unilen] = cpu_to_le16(exfat_toupper(sb, *uniname)); uniname++; unilen++; } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e573b0cd2737dc1f8f83489584c4e05af45050d3..83d917f7e5425752084e0a21fc43671bdd11339e 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -18,6 +18,7 @@ #include #include #include +#include static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags, struct fuse_page_desc **desc) @@ -1586,7 +1587,6 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct backing_dev_info *bdi = inode_to_bdi(inode); int i; - rb_erase(&wpa->writepages_entry, &fi->writepages); for (i = 0; i < ap->num_pages; i++) { dec_wb_stat(&bdi->wb, WB_WRITEBACK); dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP); @@ -1637,6 +1637,7 @@ __acquires(fi->lock) out_free: fi->writectr--; + rb_erase(&wpa->writepages_entry, &fi->writepages); fuse_writepage_finish(fc, wpa); spin_unlock(&fi->lock); @@ -1674,7 +1675,8 @@ __acquires(fi->lock) } } -static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa) +static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root, + struct fuse_writepage_args *wpa) { pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT; pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1; @@ -1697,11 +1699,17 @@ static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa) else if (idx_to < curr_index) p = &(*p)->rb_left; else - return (void) WARN_ON(true); + return curr; } rb_link_node(&wpa->writepages_entry, parent, p); rb_insert_color(&wpa->writepages_entry, root); + return NULL; +} + +static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa) +{ + WARN_ON(fuse_insert_writeback(root, wpa)); } static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args, @@ -1714,6 +1722,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args, mapping_set_error(inode->i_mapping, error); spin_lock(&fi->lock); + rb_erase(&wpa->writepages_entry, &fi->writepages); while (wpa->next) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_write_in *inarg = &wpa->ia.write.in; @@ -1952,14 +1961,14 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data) } /* - * First recheck under fi->lock if the offending offset is still under - * writeback. If yes, then iterate auxiliary write requests, to see if there's + * Check under fi->lock if the page is under writeback, and insert it onto the + * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's * one already added for a page at this offset. If there's none, then insert * this new request onto the auxiliary list, otherwise reuse the existing one by - * copying the new page contents over to the old temporary page. + * swapping the new temp page with the old one. */ -static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa, - struct page *page) +static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, + struct page *page) { struct fuse_inode *fi = get_fuse_inode(new_wpa->inode); struct fuse_writepage_args *tmp; @@ -1967,17 +1976,15 @@ static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa, struct fuse_args_pages *new_ap = &new_wpa->ia.ap; WARN_ON(new_ap->num_pages != 0); + new_ap->num_pages = 1; spin_lock(&fi->lock); - rb_erase(&new_wpa->writepages_entry, &fi->writepages); - old_wpa = fuse_find_writeback(fi, page->index, page->index); + old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa); if (!old_wpa) { - tree_insert(&fi->writepages, new_wpa); spin_unlock(&fi->lock); - return false; + return true; } - new_ap->num_pages = 1; for (tmp = old_wpa->next; tmp; tmp = tmp->next) { pgoff_t curr_index; @@ -2006,7 +2013,41 @@ static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa, fuse_writepage_free(new_wpa); } - return true; + return false; +} + +static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page, + struct fuse_args_pages *ap, + struct fuse_fill_wb_data *data) +{ + WARN_ON(!ap->num_pages); + + /* + * Being under writeback is unlikely but possible. For example direct + * read to an mmaped fuse file will set the page dirty twice; once when + * the pages are faulted with get_user_pages(), and then after the read + * completed. + */ + if (fuse_page_is_writeback(data->inode, page->index)) + return true; + + /* Reached max pages */ + if (ap->num_pages == fc->max_pages) + return true; + + /* Reached max write bytes */ + if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write) + return true; + + /* Discontinuity */ + if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index) + return true; + + /* Need to grow the pages array? If so, did the expansion fail? */ + if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data)) + return true; + + return false; } static int fuse_writepages_fill(struct page *page, @@ -2019,7 +2060,6 @@ static int fuse_writepages_fill(struct page *page, struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); struct page *tmp_page; - bool is_writeback; int err; if (!data->ff) { @@ -2029,25 +2069,9 @@ static int fuse_writepages_fill(struct page *page, goto out_unlock; } - /* - * Being under writeback is unlikely but possible. For example direct - * read to an mmaped fuse file will set the page dirty twice; once when - * the pages are faulted with get_user_pages(), and then after the read - * completed. - */ - is_writeback = fuse_page_is_writeback(inode, page->index); - - if (wpa && ap->num_pages && - (is_writeback || ap->num_pages == fc->max_pages || - (ap->num_pages + 1) * PAGE_SIZE > fc->max_write || - data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)) { + if (wpa && fuse_writepage_need_send(fc, page, ap, data)) { fuse_writepages_send(data); data->wpa = NULL; - } else if (wpa && ap->num_pages == data->max_pages) { - if (!fuse_pages_realloc(data)) { - fuse_writepages_send(data); - data->wpa = NULL; - } } err = -ENOMEM; @@ -2085,12 +2109,6 @@ static int fuse_writepages_fill(struct page *page, ap->args.end = fuse_writepage_end; ap->num_pages = 0; wpa->inode = inode; - - spin_lock(&fi->lock); - tree_insert(&fi->writepages, wpa); - spin_unlock(&fi->lock); - - data->wpa = wpa; } set_page_writeback(page); @@ -2098,26 +2116,25 @@ static int fuse_writepages_fill(struct page *page, ap->pages[ap->num_pages] = tmp_page; ap->descs[ap->num_pages].offset = 0; ap->descs[ap->num_pages].length = PAGE_SIZE; + data->orig_pages[ap->num_pages] = page; inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); err = 0; - if (is_writeback && fuse_writepage_in_flight(wpa, page)) { + if (data->wpa) { + /* + * Protected by fi->lock against concurrent access by + * fuse_page_is_writeback(). + */ + spin_lock(&fi->lock); + ap->num_pages++; + spin_unlock(&fi->lock); + } else if (fuse_writepage_add(wpa, page)) { + data->wpa = wpa; + } else { end_page_writeback(page); - data->wpa = NULL; - goto out_unlock; } - data->orig_pages[ap->num_pages] = page; - - /* - * Protected by fi->lock against concurrent access by - * fuse_page_is_writeback(). - */ - spin_lock(&fi->lock); - ap->num_pages++; - spin_unlock(&fi->lock); - out_unlock: unlock_page(page); @@ -2149,10 +2166,8 @@ static int fuse_writepages(struct address_space *mapping, err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); if (data.wpa) { - /* Ignore errors if we can write at least one page */ WARN_ON(!data.wpa->ia.ap.num_pages); fuse_writepages_send(&data); - err = 0; } if (data.ff) fuse_file_put(data.ff, false, false); @@ -2761,7 +2776,16 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, struct iovec *iov = iov_page; iov->iov_base = (void __user *)arg; - iov->iov_len = _IOC_SIZE(cmd); + + switch (cmd) { + case FS_IOC_GETFLAGS: + case FS_IOC_SETFLAGS: + iov->iov_len = sizeof(int); + break; + default: + iov->iov_len = _IOC_SIZE(cmd); + break; + } if (_IOC_DIR(cmd) & _IOC_WRITE) { in_iov = iov; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 5b4aebf5821fea97f032c3afd4c29c147db78616..bba747520e9b08b752128a3db2bf62da36b473a3 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -121,10 +121,12 @@ static void fuse_evict_inode(struct inode *inode) } } -static int fuse_remount_fs(struct super_block *sb, int *flags, char *data) +static int fuse_reconfigure(struct fs_context *fc) { + struct super_block *sb = fc->root->d_sb; + sync_filesystem(sb); - if (*flags & SB_MANDLOCK) + if (fc->sb_flags & SB_MANDLOCK) return -EINVAL; return 0; @@ -475,6 +477,17 @@ static int fuse_parse_param(struct fs_context *fc, struct fs_parameter *param) struct fuse_fs_context *ctx = fc->fs_private; int opt; + if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { + /* + * Ignore options coming from mount(MS_REMOUNT) for backward + * compatibility. + */ + if (fc->oldapi) + return 0; + + return invalfc(fc, "No changes allowed in reconfigure"); + } + opt = fs_parse(fc, fuse_fs_parameters, param, &result); if (opt < 0) return opt; @@ -817,7 +830,6 @@ static const struct super_operations fuse_super_operations = { .evict_inode = fuse_evict_inode, .write_inode = fuse_write_inode, .drop_inode = generic_delete_inode, - .remount_fs = fuse_remount_fs, .put_super = fuse_put_super, .umount_begin = fuse_umount_begin, .statfs = fuse_statfs, @@ -1296,6 +1308,7 @@ static int fuse_get_tree(struct fs_context *fc) static const struct fs_context_operations fuse_context_ops = { .free = fuse_free_fc, .parse_param = fuse_parse_param, + .reconfigure = fuse_reconfigure, .get_tree = fuse_get_tree, }; diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 72c9560f4467e0ae96da63545876f46971d288d1..68cd700a2719eb7617dc3f2a0dad1d7605eb2438 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -468,21 +468,10 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) } -/** - * __gfs2_readpage - readpage - * @file: The file to read a page for - * @page: The page to read - * - * This is the core of gfs2's readpage. It's used by the internal file - * reading code as in that case we already hold the glock. Also it's - * called by gfs2_readpage() once the required lock has been granted. - */ - static int __gfs2_readpage(void *file, struct page *page) { struct gfs2_inode *ip = GFS2_I(page->mapping->host); struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); - int error; if (i_blocksize(page->mapping->host) == PAGE_SIZE && @@ -505,36 +494,11 @@ static int __gfs2_readpage(void *file, struct page *page) * gfs2_readpage - read a page of a file * @file: The file to read * @page: The page of the file - * - * This deals with the locking required. We have to unlock and - * relock the page in order to get the locking in the right - * order. */ static int gfs2_readpage(struct file *file, struct page *page) { - struct address_space *mapping = page->mapping; - struct gfs2_inode *ip = GFS2_I(mapping->host); - struct gfs2_holder gh; - int error; - - unlock_page(page); - gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); - error = gfs2_glock_nq(&gh); - if (unlikely(error)) - goto out; - error = AOP_TRUNCATED_PAGE; - lock_page(page); - if (page->mapping == mapping && !PageUptodate(page)) - error = __gfs2_readpage(file, page); - else - unlock_page(page); - gfs2_glock_dq(&gh); -out: - gfs2_holder_uninit(&gh); - if (error && error != AOP_TRUNCATED_PAGE) - lock_page(page); - return error; + return __gfs2_readpage(file, page); } /** @@ -598,16 +562,9 @@ static void gfs2_readahead(struct readahead_control *rac) { struct inode *inode = rac->mapping->host; struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_holder gh; - gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); - if (gfs2_glock_nq(&gh)) - goto out_uninit; if (!gfs2_is_stuffed(ip)) mpage_readahead(rac, gfs2_block_map); - gfs2_glock_dq(&gh); -out_uninit: - gfs2_holder_uninit(&gh); } /** diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index fe305e4bfd37345048aa6859ed75b74c1699987a..bebde537ac8cf26d3329b817dccfb356f6e4e2c3 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -558,8 +558,29 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) return block_page_mkwrite_return(ret); } +static vm_fault_t gfs2_fault(struct vm_fault *vmf) +{ + struct inode *inode = file_inode(vmf->vma->vm_file); + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder gh; + vm_fault_t ret; + int err; + + gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); + err = gfs2_glock_nq(&gh); + if (err) { + ret = block_page_mkwrite_return(err); + goto out_uninit; + } + ret = filemap_fault(vmf); + gfs2_glock_dq(&gh); +out_uninit: + gfs2_holder_uninit(&gh); + return ret; +} + static const struct vm_operations_struct gfs2_vm_ops = { - .fault = filemap_fault, + .fault = gfs2_fault, .map_pages = filemap_map_pages, .page_mkwrite = gfs2_page_mkwrite, }; @@ -824,6 +845,9 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from) static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { + struct gfs2_inode *ip; + struct gfs2_holder gh; + size_t written = 0; ssize_t ret; if (iocb->ki_flags & IOCB_DIRECT) { @@ -832,7 +856,31 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to) return ret; iocb->ki_flags &= ~IOCB_DIRECT; } - return generic_file_read_iter(iocb, to); + iocb->ki_flags |= IOCB_NOIO; + ret = generic_file_read_iter(iocb, to); + iocb->ki_flags &= ~IOCB_NOIO; + if (ret >= 0) { + if (!iov_iter_count(to)) + return ret; + written = ret; + } else { + if (ret != -EAGAIN) + return ret; + if (iocb->ki_flags & IOCB_NOWAIT) + return ret; + } + ip = GFS2_I(iocb->ki_filp->f_mapping->host); + gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); + ret = gfs2_glock_nq(&gh); + if (ret) + goto out_uninit; + ret = generic_file_read_iter(iocb, to); + if (ret > 0) + written += ret; + gfs2_glock_dq(&gh); +out_uninit: + gfs2_holder_uninit(&gh); + return written ? written : ret; } /** diff --git a/fs/io_uring.c b/fs/io_uring.c index d37d7ea5ebe58c079975788c4ab9110fbdcba57f..493e5047e67c9613592d6ac6c1b0268292cca3b0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -605,6 +605,7 @@ enum { struct async_poll { struct io_poll_iocb poll; + struct io_poll_iocb *double_poll; struct io_wq_work work; }; @@ -1096,6 +1097,8 @@ static inline void io_prep_async_work(struct io_kiocb *req, { const struct io_op_def *def = &io_op_defs[req->opcode]; + io_req_init_async(req); + if (req->flags & REQ_F_ISREG) { if (def->hash_reg_file) io_wq_hash_work(&req->work, file_inode(req->file)); @@ -1104,7 +1107,6 @@ static inline void io_prep_async_work(struct io_kiocb *req, req->work.flags |= IO_WQ_WORK_UNBOUND; } - io_req_init_async(req); io_req_work_grab_env(req, def); *link = io_prep_linked_timeout(req); @@ -1274,6 +1276,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) if (cqe) { clear_bit(0, &ctx->sq_check_overflow); clear_bit(0, &ctx->cq_check_overflow); + ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; } spin_unlock_irqrestore(&ctx->completion_lock, flags); io_cqring_ev_posted(ctx); @@ -1311,6 +1314,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) if (list_empty(&ctx->cq_overflow_list)) { set_bit(0, &ctx->sq_check_overflow); set_bit(0, &ctx->cq_check_overflow); + ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; } req->flags |= REQ_F_OVERFLOW; refcount_inc(&req->refs); @@ -3551,6 +3555,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (req->flags & REQ_F_NEED_CLEANUP) return 0; + io->msg.msg.msg_name = &io->msg.addr; io->msg.iov = io->msg.fast_iov; ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags, &io->msg.iov); @@ -3732,6 +3737,7 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io) { + io->msg.msg.msg_name = &io->msg.addr; io->msg.iov = io->msg.fast_iov; #ifdef CONFIG_COMPAT @@ -3840,10 +3846,16 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock) ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg, kmsg->uaddr, flags); - if (force_nonblock && ret == -EAGAIN) - return io_setup_async_msg(req, kmsg); + if (force_nonblock && ret == -EAGAIN) { + ret = io_setup_async_msg(req, kmsg); + if (ret != -EAGAIN) + kfree(kbuf); + return ret; + } if (ret == -ERESTARTSYS) ret = -EINTR; + if (kbuf) + kfree(kbuf); } if (kmsg && kmsg->iov != kmsg->fast_iov) @@ -4148,9 +4160,9 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) return false; } -static void io_poll_remove_double(struct io_kiocb *req) +static void io_poll_remove_double(struct io_kiocb *req, void *data) { - struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; + struct io_poll_iocb *poll = data; lockdep_assert_held(&req->ctx->completion_lock); @@ -4170,7 +4182,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) { struct io_ring_ctx *ctx = req->ctx; - io_poll_remove_double(req); + io_poll_remove_double(req, req->io); req->poll.done = true; io_cqring_fill_event(req, error ? error : mangle_poll(mask)); io_commit_cqring(ctx); @@ -4187,10 +4199,9 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) hash_del(&req->hash_node); io_poll_complete(req, req->result, 0); - req->flags |= REQ_F_COMP_LOCKED; - io_put_req_find_next(req, nxt); spin_unlock_irq(&ctx->completion_lock); + io_put_req_find_next(req, nxt); io_cqring_ev_posted(ctx); } @@ -4213,21 +4224,21 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, int sync, void *key) { struct io_kiocb *req = wait->private; - struct io_poll_iocb *poll = (struct io_poll_iocb *) req->io; + struct io_poll_iocb *poll = req->apoll->double_poll; __poll_t mask = key_to_poll(key); /* for instances that support it check for an event match first: */ if (mask && !(mask & poll->events)) return 0; - if (req->poll.head) { + if (poll && poll->head) { bool done; - spin_lock(&req->poll.head->lock); - done = list_empty(&req->poll.wait.entry); + spin_lock(&poll->head->lock); + done = list_empty(&poll->wait.entry); if (!done) - list_del_init(&req->poll.wait.entry); - spin_unlock(&req->poll.head->lock); + list_del_init(&poll->wait.entry); + spin_unlock(&poll->head->lock); if (!done) __io_async_wake(req, poll, mask, io_poll_task_func); } @@ -4247,7 +4258,8 @@ static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events, } static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, - struct wait_queue_head *head) + struct wait_queue_head *head, + struct io_poll_iocb **poll_ptr) { struct io_kiocb *req = pt->req; @@ -4258,7 +4270,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, */ if (unlikely(poll->head)) { /* already have a 2nd entry, fail a third attempt */ - if (req->io) { + if (*poll_ptr) { pt->error = -EINVAL; return; } @@ -4270,7 +4282,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake); refcount_inc(&req->refs); poll->wait.private = req; - req->io = (void *) poll; + *poll_ptr = poll; } pt->error = 0; @@ -4282,8 +4294,9 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, struct poll_table_struct *p) { struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); + struct async_poll *apoll = pt->req->apoll; - __io_queue_proc(&pt->req->apoll->poll, pt, head); + __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); } static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx) @@ -4333,11 +4346,13 @@ static void io_async_task_func(struct callback_head *cb) } } + io_poll_remove_double(req, apoll->double_poll); spin_unlock_irq(&ctx->completion_lock); /* restore ->work in case we need to retry again */ if (req->flags & REQ_F_WORK_INITIALIZED) memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll->double_poll); kfree(apoll); if (!canceled) { @@ -4425,7 +4440,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req) struct async_poll *apoll; struct io_poll_table ipt; __poll_t mask, ret; - bool had_io; if (!req->file || !file_can_poll(req->file)) return false; @@ -4437,11 +4451,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req) apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); if (unlikely(!apoll)) return false; + apoll->double_poll = NULL; req->flags |= REQ_F_POLLED; if (req->flags & REQ_F_WORK_INITIALIZED) memcpy(&apoll->work, &req->work, sizeof(req->work)); - had_io = req->io != NULL; io_get_req_task(req); req->apoll = apoll; @@ -4459,13 +4473,11 @@ static bool io_arm_poll_handler(struct io_kiocb *req) ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, io_async_wake); if (ret) { - ipt.error = 0; - /* only remove double add if we did it here */ - if (!had_io) - io_poll_remove_double(req); + io_poll_remove_double(req, apoll->double_poll); spin_unlock_irq(&ctx->completion_lock); if (req->flags & REQ_F_WORK_INITIALIZED) memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll->double_poll); kfree(apoll); return false; } @@ -4496,11 +4508,13 @@ static bool io_poll_remove_one(struct io_kiocb *req) bool do_complete; if (req->opcode == IORING_OP_POLL_ADD) { - io_poll_remove_double(req); + io_poll_remove_double(req, req->io); do_complete = __io_poll_remove_one(req, &req->poll); } else { struct async_poll *apoll = req->apoll; + io_poll_remove_double(req, apoll->double_poll); + /* non-poll requests have submit ref still */ do_complete = __io_poll_remove_one(req, &apoll->poll); if (do_complete) { @@ -4513,6 +4527,7 @@ static bool io_poll_remove_one(struct io_kiocb *req) if (req->flags & REQ_F_WORK_INITIALIZED) memcpy(&req->work, &apoll->work, sizeof(req->work)); + kfree(apoll->double_poll); kfree(apoll); } } @@ -4613,7 +4628,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, { struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); - __io_queue_proc(&pt->req->poll, pt, head); + __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io); } static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -4642,6 +4657,10 @@ static int io_poll_add(struct io_kiocb *req) struct io_poll_table ipt; __poll_t mask; + /* ->work is in union with hash_node and others */ + io_req_work_drop_env(req); + req->flags &= ~REQ_F_WORK_INITIALIZED; + INIT_HLIST_NODE(&req->hash_node); INIT_LIST_HEAD(&req->list); ipt.pt._qproc = io_poll_queue_proc; @@ -4721,7 +4740,9 @@ static int io_timeout_remove_prep(struct io_kiocb *req, { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len) + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + return -EINVAL; + if (sqe->ioprio || sqe->buf_index || sqe->len) return -EINVAL; req->timeout.addr = READ_ONCE(sqe->addr); @@ -4899,8 +4920,9 @@ static int io_async_cancel_prep(struct io_kiocb *req, { if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - if (sqe->flags || sqe->ioprio || sqe->off || sqe->len || - sqe->cancel_flags) + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + return -EINVAL; + if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags) return -EINVAL; req->cancel.addr = READ_ONCE(sqe->addr); @@ -4918,7 +4940,9 @@ static int io_async_cancel(struct io_kiocb *req) static int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (sqe->flags || sqe->ioprio || sqe->rw_flags) + if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) + return -EINVAL; + if (sqe->ioprio || sqe->rw_flags) return -EINVAL; req->files_update.offset = READ_ONCE(sqe->off); @@ -5709,6 +5733,7 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) * Never try inline submit of IOSQE_ASYNC is set, go straight * to async execution. */ + io_req_init_async(req); req->work.flags |= IO_WQ_WORK_CONCURRENT; io_queue_async_work(req); } else { @@ -6080,9 +6105,9 @@ static int io_sq_thread(void *data) } /* Tell userspace we may need a wakeup call */ + spin_lock_irq(&ctx->completion_lock); ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; - /* make sure to read SQ tail after writing flags */ - smp_mb(); + spin_unlock_irq(&ctx->completion_lock); to_submit = io_sqring_entries(ctx); if (!to_submit || ret == -EBUSY) { @@ -6100,13 +6125,17 @@ static int io_sq_thread(void *data) schedule(); finish_wait(&ctx->sqo_wait, &wait); + spin_lock_irq(&ctx->completion_lock); ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; + spin_unlock_irq(&ctx->completion_lock); ret = 0; continue; } finish_wait(&ctx->sqo_wait, &wait); + spin_lock_irq(&ctx->completion_lock); ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; + spin_unlock_irq(&ctx->completion_lock); } mutex_lock(&ctx->uring_lock); @@ -6693,6 +6722,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, for (i = 0; i < nr_tables; i++) kfree(ctx->file_data->table[i].files); + percpu_ref_exit(&ctx->file_data->refs); kfree(ctx->file_data->table); kfree(ctx->file_data); ctx->file_data = NULL; @@ -6845,8 +6875,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, } table->files[index] = file; err = io_sqe_file_register(ctx, file, i); - if (err) + if (err) { + fput(file); break; + } } nr_args--; done++; @@ -7342,9 +7374,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) io_mem_free(ctx->sq_sqes); percpu_ref_exit(&ctx->refs); - if (ctx->account_mem) - io_unaccount_mem(ctx->user, - ring_pages(ctx->sq_entries, ctx->cq_entries)); free_uid(ctx->user); put_cred(ctx->creds); kfree(ctx->cancel_hash); @@ -7429,6 +7458,16 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) if (ctx->rings) io_cqring_overflow_flush(ctx, true); idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx); + + /* + * Do this upfront, so we won't have a grace period where the ring + * is closed but resources aren't reaped yet. This can cause + * spurious failure in setting up a new ring. + */ + if (ctx->account_mem) + io_unaccount_mem(ctx->user, + ring_pages(ctx->sq_entries, ctx->cq_entries)); + INIT_WORK(&ctx->exit_work, io_ring_exit_work); queue_work(system_wq, &ctx->exit_work); } @@ -7488,6 +7527,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, if (list_empty(&ctx->cq_overflow_list)) { clear_bit(0, &ctx->sq_check_overflow); clear_bit(0, &ctx->cq_check_overflow); + ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; } spin_unlock_irq(&ctx->completion_lock); diff --git a/fs/namespace.c b/fs/namespace.c index f30ed401cc6d7a824ad5b19b21459cdfd3f0435f..4a0f600a3328595d6630491e3077ee4209521adc 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2603,6 +2603,7 @@ static int do_remount(struct path *path, int ms_flags, int sb_flags, if (IS_ERR(fc)) return PTR_ERR(fc); + fc->oldapi = true; err = parse_monolithic_mount_data(fc, data); if (!err) { down_write(&sb->s_umount); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 3d113cf8908ac93b63276197d09f4330132aca6d..1b79dd5cf66160c4de72c5dcc50c456950a6a6fd 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -267,6 +267,8 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) { struct inode *inode = dreq->inode; + inode_dio_end(inode); + if (dreq->iocb) { long res = (long) dreq->error; if (dreq->count != 0) { @@ -278,10 +280,7 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) complete(&dreq->completion); - igrab(inode); nfs_direct_req_release(dreq); - inode_dio_end(inode); - iput(inode); } static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) @@ -411,10 +410,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * generic layer handle the completion. */ if (requested_bytes == 0) { - igrab(inode); - nfs_direct_req_release(dreq); inode_dio_end(inode); - iput(inode); + nfs_direct_req_release(dreq); return result < 0 ? result : -EIO; } @@ -867,10 +864,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * generic layer handle the completion. */ if (requested_bytes == 0) { - igrab(inode); - nfs_direct_req_release(dreq); inode_dio_end(inode); - iput(inode); + nfs_direct_req_release(dreq); return result < 0 ? result : -EIO; } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index ccd6c1637b270b213c44e1d73775c37ae3ef05ae..f96367a2463e37a355d777fdfe4314eeb561191e 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -83,7 +83,6 @@ nfs_file_release(struct inode *inode, struct file *filp) dprintk("NFS: release(%pD2)\n", filp); nfs_inc_stats(inode, NFSIOS_VFSRELEASE); - inode_dio_wait(inode); nfs_file_clear_open_context(filp); return 0; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e32717fd1169a90f47ddf16cc88116ac113672b8..2e2dac29a9e91507c9f122134a3f8808341f5248 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -774,6 +774,14 @@ static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, slot->seq_nr_last_acked = seqnr; } +static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, + struct nfs4_slot *slot) +{ + struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); + if (!IS_ERR(task)) + rpc_put_task_async(task); +} + static int nfs41_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) { @@ -790,6 +798,7 @@ static int nfs41_sequence_process(struct rpc_task *task, goto out; session = slot->table->session; + clp = session->clp; trace_nfs4_sequence_done(session, res); @@ -804,7 +813,6 @@ static int nfs41_sequence_process(struct rpc_task *task, nfs4_slot_sequence_acked(slot, slot->seq_nr); /* Update the slot's sequence and clientid lease timer */ slot->seq_done = 1; - clp = session->clp; do_renew_lease(clp, res->sr_timestamp); /* Check sequence flags */ nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, @@ -852,10 +860,18 @@ static int nfs41_sequence_process(struct rpc_task *task, /* * Were one or more calls using this slot interrupted? * If the server never received the request, then our - * transmitted slot sequence number may be too high. + * transmitted slot sequence number may be too high. However, + * if the server did receive the request then it might + * accidentally give us a reply with a mismatched operation. + * We can sort this out by sending a lone sequence operation + * to the server on the same slot. */ if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { slot->seq_nr--; + if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { + nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); + res->sr_slot = NULL; + } goto retry_nowait; } /* diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index cce2510b2ccaaf10793d382094e04f845b98a91d..c9056316a0b353f5d8f87f1cdbc8aee690b2be82 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -507,6 +507,17 @@ find_any_file(struct nfs4_file *f) return ret; } +static struct nfsd_file *find_deleg_file(struct nfs4_file *f) +{ + struct nfsd_file *ret = NULL; + + spin_lock(&f->fi_lock); + if (f->fi_deleg_file) + ret = nfsd_file_get(f->fi_deleg_file); + spin_unlock(&f->fi_lock); + return ret; +} + static atomic_long_t num_delegations; unsigned long max_delegations; @@ -2444,6 +2455,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) oo = ols->st_stateowner; nf = st->sc_file; file = find_any_file(nf); + if (!file) + return 0; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2481,6 +2494,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) oo = ols->st_stateowner; nf = st->sc_file; file = find_any_file(nf); + if (!file) + return 0; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2513,7 +2528,9 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) ds = delegstateid(st); nf = st->sc_file; - file = nf->fi_deleg_file; + file = find_deleg_file(nf); + if (!file) + return 0; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); @@ -2529,6 +2546,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) seq_printf(s, ", "); nfs4_show_fname(s, file); seq_printf(s, " }\n"); + nfsd_file_put(file); return 0; } diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 79dd052c7dbf5aae2aea21f883f1d26965c084ce..5e0cde85bd6b040041cd09b9ca79628c7227574e 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -895,7 +895,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, return err; } -int ovl_copy_up_flags(struct dentry *dentry, int flags) +static int ovl_copy_up_flags(struct dentry *dentry, int flags) { int err = 0; const struct cred *old_cred = ovl_override_creds(dentry->d_sb); diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c index 8f4286450f92a5fc0cf2664fdd11ce9bbf47ab43..0e696f72cf651ac6023a5e1356f7ae39ab0a7ea1 100644 --- a/fs/overlayfs/export.c +++ b/fs/overlayfs/export.c @@ -476,7 +476,7 @@ static struct dentry *ovl_lookup_real_inode(struct super_block *sb, if (IS_ERR_OR_NULL(this)) return this; - if (WARN_ON(ovl_dentry_real_at(this, layer->idx) != real)) { + if (ovl_dentry_real_at(this, layer->idx) != real) { dput(this); this = ERR_PTR(-EIO); } diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 01820e654a2192f56e56fd5eee084ff9dc3ed790..0d940e29d62b1ea7ea507b87b7e0e49ae4436e8d 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -33,13 +33,16 @@ static char ovl_whatisit(struct inode *inode, struct inode *realinode) return 'm'; } +/* No atime modificaton nor notify on underlying */ +#define OVL_OPEN_FLAGS (O_NOATIME | FMODE_NONOTIFY) + static struct file *ovl_open_realfile(const struct file *file, struct inode *realinode) { struct inode *inode = file_inode(file); struct file *realfile; const struct cred *old_cred; - int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY; + int flags = file->f_flags | OVL_OPEN_FLAGS; int acc_mode = ACC_MODE(flags); int err; @@ -72,8 +75,7 @@ static int ovl_change_flags(struct file *file, unsigned int flags) struct inode *inode = file_inode(file); int err; - /* No atime modificaton on underlying */ - flags |= O_NOATIME | FMODE_NONOTIFY; + flags |= OVL_OPEN_FLAGS; /* If some flag changed that cannot be changed then something's amiss */ if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK)) @@ -126,7 +128,7 @@ static int ovl_real_fdget_meta(const struct file *file, struct fd *real, } /* Did the flags change since open? */ - if (unlikely((file->f_flags ^ real->file->f_flags) & ~O_NOATIME)) + if (unlikely((file->f_flags ^ real->file->f_flags) & ~OVL_OPEN_FLAGS)) return ovl_change_flags(real->file, file->f_flags); return 0; diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 3566282a9199cb6e759cf21cf053ddaea3819464..f7d4358db637857629db439617b5138278405bed 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -389,7 +389,7 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected, } static int ovl_check_origin(struct ovl_fs *ofs, struct dentry *upperdentry, - struct ovl_path **stackp, unsigned int *ctrp) + struct ovl_path **stackp) { struct ovl_fh *fh = ovl_get_fh(upperdentry, OVL_XATTR_ORIGIN); int err; @@ -406,10 +406,6 @@ static int ovl_check_origin(struct ovl_fs *ofs, struct dentry *upperdentry, return err; } - if (WARN_ON(*ctrp)) - return -EIO; - - *ctrp = 1; return 0; } @@ -861,8 +857,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, goto out; } if (upperdentry && !d.is_dir) { - unsigned int origin_ctr = 0; - /* * Lookup copy up origin by decoding origin file handle. * We may get a disconnected dentry, which is fine, @@ -873,8 +867,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, * number - it's the same as if we held a reference * to a dentry in lower layer that was moved under us. */ - err = ovl_check_origin(ofs, upperdentry, &origin_path, - &origin_ctr); + err = ovl_check_origin(ofs, upperdentry, &origin_path); if (err) goto out_put_upper; @@ -1073,6 +1066,10 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, upperredirect = NULL; goto out_free_oe; } + err = ovl_check_metacopy_xattr(upperdentry); + if (err < 0) + goto out_free_oe; + uppermetacopy = err; } if (upperdentry || ctr) { diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index b725c7f15ff49b4e57ae1d925875496a4fd47831..29bc1ec699e7d7a2ef3c4d34534e665855a91de9 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -483,7 +483,6 @@ void ovl_aio_request_cache_destroy(void); /* copy_up.c */ int ovl_copy_up(struct dentry *dentry); int ovl_copy_up_with_data(struct dentry *dentry); -int ovl_copy_up_flags(struct dentry *dentry, int flags); int ovl_maybe_copy_up(struct dentry *dentry, int flags); int ovl_copy_xattr(struct dentry *old, struct dentry *new); int ovl_set_attr(struct dentry *upper, struct kstat *stat); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 91476bc422f9646c070ea06b01cb7ef1706ef057..4b38141c298570a030cf290aa73eafe2e075bb6d 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -580,12 +580,19 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) } } - /* Workdir is useless in non-upper mount */ - if (!config->upperdir && config->workdir) { - pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n", - config->workdir); - kfree(config->workdir); - config->workdir = NULL; + /* Workdir/index are useless in non-upper mount */ + if (!config->upperdir) { + if (config->workdir) { + pr_info("option \"workdir=%s\" is useless in a non-upper mount, ignore\n", + config->workdir); + kfree(config->workdir); + config->workdir = NULL; + } + if (config->index && index_opt) { + pr_info("option \"index=on\" is useless in a non-upper mount, ignore\n"); + index_opt = false; + } + config->index = false; } err = ovl_parse_redirect_mode(config, config->redirect_mode); @@ -622,11 +629,13 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) /* Resolve nfs_export -> index dependency */ if (config->nfs_export && !config->index) { - if (nfs_export_opt && index_opt) { + if (!config->upperdir && config->redirect_follow) { + pr_info("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n"); + config->nfs_export = false; + } else if (nfs_export_opt && index_opt) { pr_err("conflicting options: nfs_export=on,index=off\n"); return -EINVAL; - } - if (index_opt) { + } else if (index_opt) { /* * There was an explicit index=off that resulted * in this conflict. @@ -1352,8 +1361,15 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs, goto out; } + /* index dir will act also as workdir */ + iput(ofs->workdir_trap); + ofs->workdir_trap = NULL; + dput(ofs->workdir); + ofs->workdir = NULL; ofs->indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true); if (ofs->indexdir) { + ofs->workdir = dget(ofs->indexdir); + err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap, "indexdir"); if (err) @@ -1396,6 +1412,18 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs)) return true; + /* + * We allow using single lower with null uuid for index and nfs_export + * for example to support those features with single lower squashfs. + * To avoid regressions in setups of overlay with re-formatted lower + * squashfs, do not allow decoding origin with lower null uuid unless + * user opted-in to one of the new features that require following the + * lower inode of non-dir upper. + */ + if (!ofs->config.index && !ofs->config.metacopy && !ofs->config.xino && + uuid_is_null(uuid)) + return false; + for (i = 0; i < ofs->numfs; i++) { /* * We use uuid to associate an overlay lower file handle with a @@ -1493,14 +1521,23 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, if (err < 0) goto out; + /* + * Check if lower root conflicts with this overlay layers before + * checking if it is in-use as upperdir/workdir of "another" + * mount, because we do not bother to check in ovl_is_inuse() if + * the upperdir/workdir is in fact in-use by our + * upperdir/workdir. + */ err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir"); if (err) goto out; if (ovl_is_inuse(stack[i].dentry)) { err = ovl_report_in_use(ofs, "lowerdir"); - if (err) + if (err) { + iput(trap); goto out; + } } mnt = clone_private_mount(&stack[i]); @@ -1575,10 +1612,6 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, if (!ofs->config.upperdir && numlower == 1) { pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n"); return ERR_PTR(-EINVAL); - } else if (!ofs->config.upperdir && ofs->config.nfs_export && - ofs->config.redirect_follow) { - pr_warn("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n"); - ofs->config.nfs_export = false; } stack = kcalloc(numlower, sizeof(struct path), GFP_KERNEL); @@ -1842,21 +1875,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (!ovl_upper_mnt(ofs)) sb->s_flags |= SB_RDONLY; - if (!(ovl_force_readonly(ofs)) && ofs->config.index) { - /* index dir will act also as workdir */ - dput(ofs->workdir); - ofs->workdir = NULL; - iput(ofs->workdir_trap); - ofs->workdir_trap = NULL; - + if (!ovl_force_readonly(ofs) && ofs->config.index) { err = ovl_get_indexdir(sb, ofs, oe, &upperpath); if (err) goto out_free_oe; /* Force r/o mount with no index dir */ - if (ofs->indexdir) - ofs->workdir = dget(ofs->indexdir); - else + if (!ofs->indexdir) sb->s_flags |= SB_RDONLY; } diff --git a/fs/read_write.c b/fs/read_write.c index bbfa9b12b15eb77fc90e2c66202d42c4cb3825cc..4fb797822567a6b3500f61820d7b76ed22942635 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -419,28 +419,42 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo return ret; } -ssize_t __vfs_read(struct file *file, char __user *buf, size_t count, - loff_t *pos) +ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos) { + mm_segment_t old_fs = get_fs(); + ssize_t ret; + + if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ))) + return -EINVAL; + if (!(file->f_mode & FMODE_CAN_READ)) + return -EINVAL; + + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; + set_fs(KERNEL_DS); if (file->f_op->read) - return file->f_op->read(file, buf, count, pos); + ret = file->f_op->read(file, (void __user *)buf, count, pos); else if (file->f_op->read_iter) - return new_sync_read(file, buf, count, pos); + ret = new_sync_read(file, (void __user *)buf, count, pos); else - return -EINVAL; + ret = -EINVAL; + set_fs(old_fs); + if (ret > 0) { + fsnotify_access(file); + add_rchar(current, ret); + } + inc_syscr(current); + return ret; } ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos) { - mm_segment_t old_fs; - ssize_t result; + ssize_t ret; - old_fs = get_fs(); - set_fs(KERNEL_DS); - /* The cast to a user pointer is valid due to the set_fs() */ - result = vfs_read(file, (void __user *)buf, count, pos); - set_fs(old_fs); - return result; + ret = rw_verify_area(READ, file, pos, count); + if (ret) + return ret; + return __kernel_read(file, buf, count, pos); } EXPORT_SYMBOL(kernel_read); @@ -456,17 +470,22 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos) return -EFAULT; ret = rw_verify_area(READ, file, pos, count); - if (!ret) { - if (count > MAX_RW_COUNT) - count = MAX_RW_COUNT; - ret = __vfs_read(file, buf, count, pos); - if (ret > 0) { - fsnotify_access(file); - add_rchar(current, ret); - } - inc_syscr(current); - } + if (ret) + return ret; + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; + if (file->f_op->read) + ret = file->f_op->read(file, buf, count, pos); + else if (file->f_op->read_iter) + ret = new_sync_read(file, buf, count, pos); + else + ret = -EINVAL; + if (ret > 0) { + fsnotify_access(file); + add_rchar(current, ret); + } + inc_syscr(current); return ret; } @@ -488,23 +507,15 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t return ret; } -static ssize_t __vfs_write(struct file *file, const char __user *p, - size_t count, loff_t *pos) -{ - if (file->f_op->write) - return file->f_op->write(file, p, count, pos); - else if (file->f_op->write_iter) - return new_sync_write(file, p, count, pos); - else - return -EINVAL; -} - +/* caller is responsible for file_start_write/file_end_write */ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos) { mm_segment_t old_fs; const char __user *p; ssize_t ret; + if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE))) + return -EBADF; if (!(file->f_mode & FMODE_CAN_WRITE)) return -EINVAL; @@ -513,7 +524,12 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t p = (__force const char __user *)buf; if (count > MAX_RW_COUNT) count = MAX_RW_COUNT; - ret = __vfs_write(file, p, count, pos); + if (file->f_op->write) + ret = file->f_op->write(file, p, count, pos); + else if (file->f_op->write_iter) + ret = new_sync_write(file, p, count, pos); + else + ret = -EINVAL; set_fs(old_fs); if (ret > 0) { fsnotify_modify(file); @@ -522,21 +538,20 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t inc_syscw(current); return ret; } -EXPORT_SYMBOL(__kernel_write); ssize_t kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos) { - mm_segment_t old_fs; - ssize_t res; + ssize_t ret; - old_fs = get_fs(); - set_fs(KERNEL_DS); - /* The cast to a user pointer is valid due to the set_fs() */ - res = vfs_write(file, (__force const char __user *)buf, count, pos); - set_fs(old_fs); + ret = rw_verify_area(WRITE, file, pos, count); + if (ret) + return ret; - return res; + file_start_write(file); + ret = __kernel_write(file, buf, count, pos); + file_end_write(file); + return ret; } EXPORT_SYMBOL(kernel_write); @@ -552,19 +567,23 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_ return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); - if (!ret) { - if (count > MAX_RW_COUNT) - count = MAX_RW_COUNT; - file_start_write(file); - ret = __vfs_write(file, buf, count, pos); - if (ret > 0) { - fsnotify_modify(file); - add_wchar(current, ret); - } - inc_syscw(current); - file_end_write(file); + if (ret) + return ret; + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; + file_start_write(file); + if (file->f_op->write) + ret = file->f_op->write(file, buf, count, pos); + else if (file->f_op->write_iter) + ret = new_sync_write(file, buf, count, pos); + else + ret = -EINVAL; + if (ret > 0) { + fsnotify_modify(file); + add_wchar(current, ret); } - + inc_syscw(current); + file_end_write(file); return ret; } diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 64f61330564acdb9b38995cfb8f02f2c040435e7..76bb1c846845e33737d428dcc26e4840fd4aeebe 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -175,7 +175,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, /* Extract the length of the metadata block */ data = page_address(bvec->bv_page) + bvec->bv_offset; length = data[offset]; - if (offset <= bvec->bv_len - 1) { + if (offset < bvec->bv_len - 1) { length |= data[offset + 1] << 8; } else { if (WARN_ON_ONCE(!bio_next_segment(bio, &iter_all))) { diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index 07bc42d62673cec1fc16f59932db30ecfd285089..abfb17f88f9a222ac2189de08d872a69b206c6f2 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -607,14 +607,14 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) int nr_pages; ssize_t ret; - nr_pages = iov_iter_npages(from, BIO_MAX_PAGES); - if (!nr_pages) - return 0; - max = queue_max_zone_append_sectors(bdev_get_queue(bdev)); max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize); iov_iter_truncate(from, max); + nr_pages = iov_iter_npages(from, BIO_MAX_PAGES); + if (!nr_pages) + return 0; + bio = bio_alloc_bioset(GFP_NOFS, nr_pages, &fs_bio_set); if (!bio) return -ENOMEM; @@ -1119,7 +1119,7 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, char *file_name; struct dentry *dir; unsigned int n = 0; - int ret = -ENOMEM; + int ret; /* If the group is empty, there is nothing to do */ if (!zd->nr_zones[type]) @@ -1135,8 +1135,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, zgroup_name = "seq"; dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type); - if (!dir) + if (!dir) { + ret = -ENOMEM; goto free; + } /* * The first zone contains the super block: skip it. @@ -1174,8 +1176,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, * Use the file number within its group as file name. */ snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n); - if (!zonefs_create_inode(dir, file_name, zone, type)) + if (!zonefs_create_inode(dir, file_name, zone, type)) { + ret = -ENOMEM; goto free; + } n++; } diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 8b1e020e9a034117afed69cbc08e2ed2b7fc8fa8..30a3aab312e6cf28c0f0e9601cef713ef4b0bd98 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -456,7 +456,7 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer, #if !defined(inb) && !defined(_inb) #define _inb _inb -static inline u16 _inb(unsigned long addr) +static inline u8 _inb(unsigned long addr) { u8 val; @@ -482,7 +482,7 @@ static inline u16 _inw(unsigned long addr) #if !defined(inl) && !defined(_inl) #define _inl _inl -static inline u16 _inl(unsigned long addr) +static inline u32 _inl(unsigned long addr) { u32 val; diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h index 9439ff037b2d1fa213a7448d490db515112e427a..5698fca3bf5608d963a379fa26b9e753297f7d00 100644 --- a/include/asm-generic/mmiowb.h +++ b/include/asm-generic/mmiowb.h @@ -27,7 +27,7 @@ #include DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); -#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state) +#define __mmiowb_state() raw_cpu_ptr(&__mmiowb_state) #else #define __mmiowb_state() arch_mmiowb_state() #endif /* arch_mmiowb_state */ @@ -35,7 +35,9 @@ DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state); static inline void mmiowb_set_pending(void) { struct mmiowb_state *ms = __mmiowb_state(); - ms->mmiowb_pending = ms->nesting_count; + + if (likely(ms->nesting_count)) + ms->mmiowb_pending = ms->nesting_count; } static inline void mmiowb_spin_lock(void) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index db600ef218d7d2e56b6fdf895b1958f80fa05462..052e0f05a984170de74aa31d781530bb528a0549 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -341,7 +341,8 @@ #define PAGE_ALIGNED_DATA(page_align) \ . = ALIGN(page_align); \ - *(.data..page_aligned) + *(.data..page_aligned) \ + . = ALIGN(page_align); #define READ_MOSTLY_DATA(align) \ . = ALIGN(align); \ @@ -737,7 +738,9 @@ . = ALIGN(bss_align); \ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \ BSS_FIRST_SECTIONS \ + . = ALIGN(PAGE_SIZE); \ *(.bss..page_aligned) \ + . = ALIGN(PAGE_SIZE); \ *(.dynbss) \ *(BSS_MAIN) \ *(COMMON) \ diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 6c3ef49b46b3ae7357dbfee50b73f4538bc62ed3..e73dea5c733306467fbeed0db11b721be97d34f6 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -865,6 +865,18 @@ struct drm_mode_config { */ bool prefer_shadow_fbdev; + /** + * @fbdev_use_iomem: + * + * Set to true if framebuffer reside in iomem. + * When set to true memcpy_toio() is used when copying the framebuffer in + * drm_fb_helper.drm_fb_helper_dirty_blit_real(). + * + * FIXME: This should be replaced with a per-mapping is_iomem + * flag (like ttm does), and then used everywhere in fbdev code. + */ + bool fbdev_use_iomem; + /** * @quirk_addfb_prefer_xbgr_30bpp: * diff --git a/include/linux/bits.h b/include/linux/bits.h index 4671fbf28842718fa74c9664dee74f8e129dbb7e..7f475d59a0974f17d7a9765c643b798a1dc6ee83 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -18,8 +18,7 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#if !defined(__ASSEMBLY__) && \ - (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000) +#if !defined(__ASSEMBLY__) #include #define GENMASK_INPUT_CHECK(h, l) \ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h index 4052d649f36d05d732bd69d8ca784ac2be3b1275..47d5b0c708c98bffcbad9a915725be64b7809139 100644 --- a/include/linux/bpf-netns.h +++ b/include/linux/bpf-netns.h @@ -33,7 +33,7 @@ int netns_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); -int netns_bpf_prog_detach(const union bpf_attr *attr); +int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); int netns_bpf_link_create(const union bpf_attr *attr, struct bpf_prog *prog); #else @@ -49,7 +49,8 @@ static inline int netns_bpf_prog_attach(const union bpf_attr *attr, return -EOPNOTSUPP; } -static inline int netns_bpf_prog_detach(const union bpf_attr *attr) +static inline int netns_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) { return -EOPNOTSUPP; } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 07052d44bca1c692cb6f360dd614c4c6ce854016..9750a1902ee503814ce83e6b890837b1c673bdc4 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1543,13 +1543,16 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_BPF_STREAM_PARSER) -int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, + struct bpf_prog *old, u32 which); int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); void sock_map_unhash(struct sock *sk); void sock_map_close(struct sock *sk, long timeout); #else static inline int sock_map_prog_update(struct bpf_map *map, - struct bpf_prog *prog, u32 which) + struct bpf_prog *prog, + struct bpf_prog *old, u32 which) { return -EOPNOTSUPP; } @@ -1559,6 +1562,12 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr, { return -EINVAL; } + +static inline int sock_map_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_BPF_STREAM_PARSER */ #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) diff --git a/include/linux/btf.h b/include/linux/btf.h index 5c1ea99b480fa7164766454be9871b7d247d0d40..8b81fbb4497cf156bb977043a84190e0a3ef1690 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -82,6 +82,11 @@ static inline bool btf_type_is_int(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_INT; } +static inline bool btf_type_is_small_int(const struct btf_type *t) +{ + return btf_type_is_int(t) && t->size <= sizeof(u64); +} + static inline bool btf_type_is_enum(const struct btf_type *t) { return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM; diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 52661155f85fd935f930c9fe1c3990257c8302a1..fee0b5547cd0a0e32e3b0cc713e93d225c7bda60 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -790,7 +790,9 @@ struct sock_cgroup_data { union { #ifdef __LITTLE_ENDIAN struct { - u8 is_data; + u8 is_data : 1; + u8 no_refcnt : 1; + u8 unused : 6; u8 padding; u16 prioidx; u32 classid; @@ -800,7 +802,9 @@ struct sock_cgroup_data { u32 classid; u16 prioidx; u8 padding; - u8 is_data; + u8 unused : 6; + u8 no_refcnt : 1; + u8 is_data : 1; } __packed; #endif u64 val; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 4598e4da6b1b72f2d36f5e9fd04acdea13195141..618838c48313cd82a9ce917378a5508396b0dcee 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -822,6 +822,7 @@ extern spinlock_t cgroup_sk_update_lock; void cgroup_sk_alloc_disable(void); void cgroup_sk_alloc(struct sock_cgroup_data *skcd); +void cgroup_sk_clone(struct sock_cgroup_data *skcd); void cgroup_sk_free(struct sock_cgroup_data *skcd); static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) @@ -835,7 +836,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) */ v = READ_ONCE(skcd->val); - if (v & 1) + if (v & 3) return &cgrp_dfl_root.cgrp; return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; @@ -847,6 +848,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) #else /* CONFIG_CGROUP_DATA */ static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} #endif /* CONFIG_CGROUP_DATA */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 1c74464c80c65320a898a6f359790063eb15c3d7..0b1dc61f3955c545e5fecae15aebf321d61855f4 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -11,7 +11,7 @@ + __GNUC_PATCHLEVEL__) /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */ -#if GCC_VERSION < 40800 +#if GCC_VERSION < 40900 # error Sorry, your compiler is too old - please upgrade it. #endif diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index c3bf7710f69acb8425228ef41568ec04a2709f9a..01dd58c74d808a67dc80b3a821f55ed387d170a8 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -252,32 +252,8 @@ struct ftrace_likely_data { * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving * non-scalar types unchanged. */ -#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__) /* - * We build this out of a couple of helper macros in a vain attempt to - * help you keep your lunch down while reading it. - */ -#define __pick_scalar_type(x, type, otherwise) \ - __builtin_choose_expr(__same_type(x, type), (type)0, otherwise) - -/* - * 'char' is not type-compatible with either 'signed char' or 'unsigned char', - * so we include the naked type here as well as the signed/unsigned variants. - */ -#define __pick_integer_type(x, type, otherwise) \ - __pick_scalar_type(x, type, \ - __pick_scalar_type(x, unsigned type, \ - __pick_scalar_type(x, signed type, otherwise))) - -#define __unqual_scalar_typeof(x) typeof( \ - __pick_integer_type(x, char, \ - __pick_integer_type(x, short, \ - __pick_integer_type(x, int, \ - __pick_integer_type(x, long, \ - __pick_integer_type(x, long long, x)))))) -#else -/* - * If supported, prefer C11 _Generic for better compile-times. As above, 'char' + * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' * is not type-compatible with 'signed char', and we define a separate case. */ #define __scalar_type_to_expr_cases(type) \ @@ -293,7 +269,6 @@ struct ftrace_likely_data { __scalar_type_to_expr_cases(long), \ __scalar_type_to_expr_cases(long long), \ default: (x))) -#endif /* Is this type a native word size -- useful for atomic operations */ #define __native_word(t) \ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 8750f2dc5613ab838faf72a09db5a724603547c8..73dec4b5d5be1928ff708f2e31ff70dc97ec4255 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -426,6 +426,7 @@ const char *dm_device_name(struct mapped_device *md); int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct dm_target *ti); +int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); union map_info *dm_get_rq_mapinfo(struct request *rq); diff --git a/include/linux/device.h b/include/linux/device.h index 15460a5ac024a1559aa91fdee4f7471088c8ac44..5efed864b3871d3f34391fc605ea2bffec79c5a8 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -433,7 +433,8 @@ enum dl_dev_state { * @suppliers: List of links to supplier devices. * @consumers: List of links to consumer devices. * @needs_suppliers: Hook to global list of devices waiting for suppliers. - * @defer_sync: Hook to global list of devices that have deferred sync_state. + * @defer_hook: Hook to global list of devices that have deferred sync_state or + * deferred fw_devlink. * @need_for_probe: If needs_suppliers is on a list, this indicates if the * suppliers are needed for probe or not. * @status: Driver status information. @@ -442,7 +443,7 @@ struct dev_links_info { struct list_head suppliers; struct list_head consumers; struct list_head needs_suppliers; - struct list_head defer_sync; + struct list_head defer_hook; bool need_for_probe; enum dl_dev_state status; }; diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index ab0c156abee6e95e1f37b526467e2efd012c5b5a..a2ca294eaebe7bef322576a9c7519666249cdc87 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -311,6 +311,7 @@ struct dma_buf { void *vmap_ptr; const char *exp_name; const char *name; + spinlock_t name_lock; /* spinlock to protect name access */ struct module *owner; struct list_head list_node; void *priv; diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index cdfa400f89b3d38ad176f0dea420ef9ef7617dc0..ab2e20cba9514be7b37952bafa1c93b4bd37b11d 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -69,6 +69,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, u64 dma_direct_get_required_mask(struct device *dev); gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, u64 *phys_mask); +bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size); void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, @@ -85,4 +86,5 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); int dma_direct_supported(struct device *dev, u64 mask); +bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr); #endif /* _LINUX_DMA_DIRECT_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 78f677cf45ab6937c0e12bde4f1fe21a3f68def2..a33ed3954ed46583a60331abe8f31369d8711536 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -461,6 +461,7 @@ int dma_set_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask); u64 dma_get_required_mask(struct device *dev); size_t dma_max_mapping_size(struct device *dev); +bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); unsigned long dma_get_merge_boundary(struct device *dev); #else /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_page_attrs(struct device *dev, @@ -571,6 +572,10 @@ static inline size_t dma_max_mapping_size(struct device *dev) { return 0; } +static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) +{ + return false; +} static inline unsigned long dma_get_merge_boundary(struct device *dev) { return 0; diff --git a/include/linux/efi.h b/include/linux/efi.h index bb35f3305e5506f1721cac2f88854c6f38f97b21..05c47f857383ebf215898cb39c956b7a29d9f86c 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -994,6 +994,7 @@ int efivars_register(struct efivars *efivars, int efivars_unregister(struct efivars *efivars); struct kobject *efivars_kobject(void); +int efivar_supports_writes(void); int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), void *data, bool duplicates, struct list_head *head); diff --git a/include/linux/filter.h b/include/linux/filter.h index 2593777236037afc2247ae39c25e492bf42dc791..0b0144752d780aec77f01a8bac8f055752fe6a12 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -884,12 +884,12 @@ void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_helper_changes_pkt_data(void *func); -static inline bool bpf_dump_raw_ok(void) +static inline bool bpf_dump_raw_ok(const struct cred *cred) { /* Reconstruction of call-sites is dependent on kallsyms, * thus make dump the same restriction. */ - return kallsyms_show_value() == 1; + return kallsyms_show_value(cred); } struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, diff --git a/include/linux/fs.h b/include/linux/fs.h index 3f881a892ea7469278ef799f44254a8f809ffd3f..f5abba86107d86d4342ec6788c6573cc69ccbfbd 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -315,6 +315,7 @@ enum rw_hint { #define IOCB_SYNC (1 << 5) #define IOCB_WRITE (1 << 6) #define IOCB_NOWAIT (1 << 7) +#define IOCB_NOIO (1 << 9) struct kiocb { struct file *ki_filp; @@ -1917,7 +1918,6 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, struct iovec *fast_pointer, struct iovec **ret_pointer); -extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_readv(struct file *, const struct iovec __user *, @@ -3033,6 +3033,7 @@ extern int kernel_read_file_from_path_initns(const char *, void **, loff_t *, lo extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t, enum kernel_read_file_id); extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *); +ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos); extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *); extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *); extern struct file * open_exec(const char *); diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 5f24fcbfbfb4467717a339e66c07b6c35e614587..37e1e8f7f08da36ea8d79df4e8125cb7e2bf48da 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -109,6 +109,7 @@ struct fs_context { enum fs_context_phase phase:8; /* The phase the context is in */ bool need_free:1; /* Need to call ops->free() */ bool global:1; /* Goes into &init_user_ns */ + bool oldapi:1; /* Coming from mount(2) */ }; struct fs_context_operations { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index b8b8963f8bb951df304bf8452fe5286df4fffa59..4e7714c88f95de5a443482061af71a84fe43371f 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -56,7 +56,7 @@ struct property_entry; * on a bus (or read from them). Apart from two basic transfer functions to * transmit one message at a time, a more complex version can be used to * transmit an arbitrary number of messages without interruption. - * @count must be be less than 64k since msg.len is u16. + * @count must be less than 64k since msg.len is u16. */ int i2c_transfer_buffer_flags(const struct i2c_client *client, char *buf, int count, u16 flags); @@ -1001,7 +1001,7 @@ static inline u32 i2c_acpi_find_bus_speed(struct device *dev) static inline struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info) { - return NULL; + return ERR_PTR(-ENODEV); } static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) { diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index fe15f831841b4da7f2b6f2f7a9722311a7c876a0..9f732499ea88e8bd934288df4d57ca425765e713 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -3333,13 +3333,17 @@ struct ieee80211_multiple_bssid_configuration { #define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) #define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) #define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) +#define WLAN_AKM_SUITE_AP_PEER_KEY SUITE(0x000FAC, 10) #define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11) #define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12) +#define WLAN_AKM_SUITE_FT_8021X_SHA384 SUITE(0x000FAC, 13) #define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14) #define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15) #define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16) #define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17) #define WLAN_AKM_SUITE_OWE SUITE(0x000FAC, 18) +#define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19) +#define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20) #define WLAN_MAX_KEY_LEN 32 diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index b05e855f1ddd4f2ea1cb0282ad754a7a50f826fe..41a518336673b496faf7ce0ea2a65068fe6814f2 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -25,6 +25,8 @@ #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ +#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ + /* * struct vlan_hdr - vlan header * @h_vlan_TCI: priority and VLAN ID @@ -577,10 +579,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, +static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, int *depth) { - unsigned int vlan_depth = skb->mac_len; + unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; /* if type is 802.1Q/AD then the header should already be * present at mac_len - VLAN_HLEN (if mac_len > 0), or at @@ -595,13 +597,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, vlan_depth = ETH_HLEN; } do { - struct vlan_hdr *vh; + struct vlan_hdr vhdr, *vh; - if (unlikely(!pskb_may_pull(skb, - vlan_depth + VLAN_HLEN))) + vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); + if (unlikely(!vh || !--parse_depth)) return 0; - vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } while (eth_type_vlan(type)); @@ -620,11 +621,25 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 vlan_get_protocol(struct sk_buff *skb) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) { return __vlan_get_protocol(skb, skb->protocol, NULL); } +/* A getter for the SKB protocol field which will handle VLAN tags consistently + * whether VLAN acceleration is enabled or not. + */ +static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) +{ + if (!skip_vlan) + /* VLAN acceleration strips the VLAN header from the skb and + * moves it to skb->vlan_proto + */ + return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; + + return vlan_get_protocol(skb); +} + static inline void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h index 1ecb6b45812c9dfd70d9eb77a48baf5eafef74d9..520858d126808019889e80d1be2a32863900ca64 100644 --- a/include/linux/input/elan-i2c-ids.h +++ b/include/linux/input/elan-i2c-ids.h @@ -67,8 +67,15 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN062B", 0 }, { "ELAN062C", 0 }, { "ELAN062D", 0 }, + { "ELAN062E", 0 }, /* Lenovo V340 Whiskey Lake U */ + { "ELAN062F", 0 }, /* Lenovo V340 Comet Lake U */ { "ELAN0631", 0 }, { "ELAN0632", 0 }, + { "ELAN0633", 0 }, /* Lenovo S145 */ + { "ELAN0634", 0 }, /* Lenovo V340 Ice lake */ + { "ELAN0635", 0 }, /* Lenovo V1415-IIL */ + { "ELAN0636", 0 }, /* Lenovo V1415-Dali */ + { "ELAN0637", 0 }, /* Lenovo V1415-IGLR */ { "ELAN1000", 0 }, { } }; diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 0beaa3eba15516d53d149a89e2a562313ef8d243..c75e4d3d8833fd0f77f0e61ca1014826afdf63bc 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -107,9 +107,12 @@ io_mapping_init_wc(struct io_mapping *iomap, resource_size_t base, unsigned long size) { + iomap->iomem = ioremap_wc(base, size); + if (!iomap->iomem) + return NULL; + iomap->base = base; iomap->size = size; - iomap->iomem = ioremap_wc(base, size); #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); #elif defined(pgprot_writecombine) diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 98338dc6b5d275acdc80b2026ee6ac7111cb9f7d..481273f0c72d4256979ee0ba2b02a4b9629f4497 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -18,6 +18,7 @@ #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) +struct cred; struct module; static inline int is_kernel_inittext(unsigned long addr) @@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); /* How and when do we show kallsyms values? */ -extern int kallsyms_show_value(void); +extern bool kallsyms_show_value(const struct cred *cred); #else /* !CONFIG_KALLSYMS */ @@ -158,7 +159,7 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u return -ERANGE; } -static inline int kallsyms_show_value(void) +static inline bool kallsyms_show_value(const struct cred *cred) { return false; } diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 529116b0cabe2879ff95ca027cd8848394795f00..477b8b7c908f8cdbae88139a9b239fbb8def95de 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -176,6 +176,17 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code, char *remcom_out_buffer, struct pt_regs *regs); +/** + * kgdb_arch_handle_qxfer_pkt - Handle architecture specific GDB XML + * packets. + * @remcom_in_buffer: The buffer of the packet we have read. + * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into. + */ + +extern void +kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, + char *remcom_out_buffer); + /** * kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU * @ignored: This parameter is only here to match the prototype. @@ -314,6 +325,7 @@ extern int kgdb_hex2mem(char *buf, char *mem, int count); extern int kgdb_isremovedbreak(unsigned long addr); extern void kgdb_schedule_breakpoint(void); +extern int kgdb_has_hit_break(unsigned long addr); extern int kgdb_handle_exception(int ex_vector, int signo, int err_code, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 13c0e4556eda941f132b55b3f7f23e6ae58fb9b2..1e6ca716635a902cb1a7056951571e684e102fc6 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -147,6 +147,7 @@ enum { MLX5_REG_MCDA = 0x9063, MLX5_REG_MCAM = 0x907f, MLX5_REG_MIRC = 0x9162, + MLX5_REG_SBCAM = 0xB01F, MLX5_REG_RESOURCE_DUMP = 0xC000, }; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index ca1887dd04231b254f3e91944158382b88104021..1340e02b14ef2fff827688dc51f32fd89533c366 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4381,6 +4381,7 @@ struct mlx5_ifc_query_vport_state_out_bits { enum { MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, + MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2, }; struct mlx5_ifc_arm_monitor_counter_in_bits { @@ -9960,6 +9961,34 @@ struct mlx5_ifc_pptb_reg_bits { u8 untagged_buff[0x4]; }; +struct mlx5_ifc_sbcam_reg_bits { + u8 reserved_at_0[0x8]; + u8 feature_group[0x8]; + u8 reserved_at_10[0x8]; + u8 access_reg_group[0x8]; + + u8 reserved_at_20[0x20]; + + u8 sb_access_reg_cap_mask[4][0x20]; + + u8 reserved_at_c0[0x80]; + + u8 sb_feature_cap_mask[4][0x20]; + + u8 reserved_at_1c0[0x40]; + + u8 cap_total_buffer_size[0x20]; + + u8 cap_cell_size[0x10]; + u8 cap_max_pg_buffers[0x8]; + u8 cap_num_pool_supported[0x8]; + + u8 reserved_at_240[0x8]; + u8 cap_sbsr_stat_size[0x8]; + u8 cap_max_tclass_data[0x8]; + u8 cap_max_cpu_ingress_tclass_sb[0x8]; +}; + struct mlx5_ifc_pbmc_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 8d764aab29de9b729ff8209a6206c3d323830197..e14cbe444afcceb5bf54e910eaf452dd009968f7 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -318,7 +318,7 @@ struct pcmcia_device_id { #define INPUT_DEVICE_ID_LED_MAX 0x0f #define INPUT_DEVICE_ID_SND_MAX 0x07 #define INPUT_DEVICE_ID_FF_MAX 0x7f -#define INPUT_DEVICE_ID_SW_MAX 0x0f +#define INPUT_DEVICE_ID_SW_MAX 0x10 #define INPUT_DEVICE_ID_PROP_MAX 0x1f #define INPUT_DEVICE_ID_MATCH_BUS 1 diff --git a/include/linux/random.h b/include/linux/random.h index 45e1f8fa742bcdaec079e7e64999ece155f01385..9ab7443bd91bdddcc3199f71749016b2df70b3ea 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -119,6 +120,8 @@ struct rnd_state { __u32 s1, s2, s3, s4; }; +DECLARE_PER_CPU(struct rnd_state, net_rand_state); + u32 prandom_u32_state(struct rnd_state *state); void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 70ebef866cc82ada0c6c1f69cc2a57e23cb732ba..68dab3e08aadfebcf1fac6a3a0a379294c550997 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -33,7 +33,7 @@ * of two or more hash tables when the rhashtable is being resized. * The end of the chain is marked with a special nulls marks which has * the least significant bit set but otherwise stores the address of - * the hash bucket. This allows us to be be sure we've found the end + * the hash bucket. This allows us to be sure we've found the end * of the right list. * The value stored in the hash bucket has BIT(0) used as a lock bit. * This bit must be atomically set before any changes are made to @@ -84,7 +84,7 @@ struct bucket_table { struct lockdep_map dep_map; - struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp; + struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; /* @@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void *arg); void rhashtable_destroy(struct rhashtable *ht); -struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash); -struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash); -struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, - struct bucket_table *tbl, - unsigned int hash); +struct rhash_lock_head __rcu **rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash); +struct rhash_lock_head __rcu **__rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash); +struct rhash_lock_head __rcu **rht_bucket_nested_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) @@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) -static inline struct rhash_lock_head *const *rht_bucket( +static inline struct rhash_lock_head __rcu *const *rht_bucket( const struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } -static inline struct rhash_lock_head **rht_bucket_var( +static inline struct rhash_lock_head __rcu **rht_bucket_var( struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } -static inline struct rhash_lock_head **rht_bucket_insert( +static inline struct rhash_lock_head __rcu **rht_bucket_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : @@ -325,7 +324,7 @@ static inline struct rhash_lock_head **rht_bucket_insert( */ static inline void rht_lock(struct bucket_table *tbl, - struct rhash_lock_head **bkt) + struct rhash_lock_head __rcu **bkt) { local_bh_disable(); bit_spin_lock(0, (unsigned long *)bkt); @@ -333,7 +332,7 @@ static inline void rht_lock(struct bucket_table *tbl, } static inline void rht_lock_nested(struct bucket_table *tbl, - struct rhash_lock_head **bucket, + struct rhash_lock_head __rcu **bucket, unsigned int subclass) { local_bh_disable(); @@ -342,18 +341,18 @@ static inline void rht_lock_nested(struct bucket_table *tbl, } static inline void rht_unlock(struct bucket_table *tbl, - struct rhash_lock_head **bkt) + struct rhash_lock_head __rcu **bkt) { lock_map_release(&tbl->dep_map); bit_spin_unlock(0, (unsigned long *)bkt); local_bh_enable(); } -static inline struct rhash_head __rcu *__rht_ptr( - struct rhash_lock_head *const *bkt) +static inline struct rhash_head *__rht_ptr( + struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) { - return (struct rhash_head __rcu *) - ((unsigned long)*bkt & ~BIT(0) ?: + return (struct rhash_head *) + ((unsigned long)p & ~BIT(0) ?: (unsigned long)RHT_NULLS_MARKER(bkt)); } @@ -365,47 +364,41 @@ static inline struct rhash_head __rcu *__rht_ptr( * access is guaranteed, such as when destroying the table. */ static inline struct rhash_head *rht_ptr_rcu( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head __rcu *const *bkt) { - struct rhash_head __rcu *p = __rht_ptr(bkt); - - return rcu_dereference(p); + return __rht_ptr(rcu_dereference(*bkt), bkt); } static inline struct rhash_head *rht_ptr( - struct rhash_lock_head *const *bkt, + struct rhash_lock_head __rcu *const *bkt, struct bucket_table *tbl, unsigned int hash) { - return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash); + return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); } static inline struct rhash_head *rht_ptr_exclusive( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head __rcu *const *bkt) { - return rcu_dereference_protected(__rht_ptr(bkt), 1); + return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); } -static inline void rht_assign_locked(struct rhash_lock_head **bkt, +static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, struct rhash_head *obj) { - struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; - if (rht_is_a_nulls(obj)) obj = NULL; - rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0))); + rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0))); } static inline void rht_assign_unlock(struct bucket_table *tbl, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, struct rhash_head *obj) { - struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; - if (rht_is_a_nulls(obj)) obj = NULL; lock_map_release(&tbl->dep_map); - rcu_assign_pointer(*p, obj); + rcu_assign_pointer(*bkt, (void *)obj); preempt_enable(); __release(bitlock); local_bh_enable(); @@ -593,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; - struct rhash_lock_head *const *bkt; + struct rhash_lock_head __rcu *const *bkt; struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; @@ -709,7 +702,7 @@ static inline void *__rhashtable_insert_fast( .ht = ht, .key = key, }; - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct bucket_table *tbl; struct rhash_head *head; @@ -995,7 +988,7 @@ static inline int __rhashtable_remove_fast_one( struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; @@ -1147,7 +1140,7 @@ static inline int __rhashtable_replace_fast( struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 4f922afb607ac01d4122dc3641faa367884faaa6..45cf7b69d8521ddb7bdae96e5dd0ff01abb239c5 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -155,7 +155,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, * Loop over each sg element in the given sg_table object. */ #define for_each_sgtable_sg(sgt, sg, i) \ - for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) + for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i) /* * Loop over each sg element in the given *DMA mapped* sg_table object. @@ -163,7 +163,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, * of the each element. */ #define for_each_sgtable_dma_sg(sgt, sg, i) \ - for_each_sg(sgt->sgl, sg, sgt->nents, i) + for_each_sg((sgt)->sgl, sg, (sgt)->nents, i) /** * sg_chain - Chain two sglists together @@ -451,7 +451,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit. */ #define for_each_sgtable_page(sgt, piter, pgoffset) \ - for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset) + for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset) /** * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object @@ -465,7 +465,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) * unit. */ #define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \ - for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset) + for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset) /* diff --git a/include/linux/sched.h b/include/linux/sched.h index 692e327d7455c0860b1cb7766dae0758dc428baf..6833729430932656d3a0f8523e69071947c75c33 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -114,10 +114,6 @@ struct task_group; #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) -#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FROZEN) == 0 && \ - (task->state & TASK_NOLOAD) == 0) - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP /* diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 9fd550e7946a5889f8d94fa1a2c116e0041df031..791f4844efeb9d8d88cd71b74670b96b816c722d 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -462,10 +462,104 @@ extern void uart_handle_cts_change(struct uart_port *uport, extern void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, unsigned int ch, unsigned int flag); -extern int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch); -extern int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch); -extern void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags); -extern int uart_handle_break(struct uart_port *port); +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL +#define SYSRQ_TIMEOUT (HZ * 5) + +bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch); + +static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) +{ + if (!port->sysrq) + return 0; + + if (ch && time_before(jiffies, port->sysrq)) { + if (sysrq_mask()) { + handle_sysrq(ch); + port->sysrq = 0; + return 1; + } + if (uart_try_toggle_sysrq(port, ch)) + return 1; + } + port->sysrq = 0; + + return 0; +} + +static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) +{ + if (!port->sysrq) + return 0; + + if (ch && time_before(jiffies, port->sysrq)) { + if (sysrq_mask()) { + port->sysrq_ch = ch; + port->sysrq = 0; + return 1; + } + if (uart_try_toggle_sysrq(port, ch)) + return 1; + } + port->sysrq = 0; + + return 0; +} + +static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) +{ + int sysrq_ch; + + if (!port->has_sysrq) { + spin_unlock_irqrestore(&port->lock, irqflags); + return; + } + + sysrq_ch = port->sysrq_ch; + port->sysrq_ch = 0; + + spin_unlock_irqrestore(&port->lock, irqflags); + + if (sysrq_ch) + handle_sysrq(sysrq_ch); +} +#else /* CONFIG_MAGIC_SYSRQ_SERIAL */ +static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) +{ + return 0; +} +static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) +{ + return 0; +} +static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) +{ + spin_unlock_irqrestore(&port->lock, irqflags); +} +#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ + +/* + * We do the SysRQ and SAK checking like this... + */ +static inline int uart_handle_break(struct uart_port *port) +{ + struct uart_state *state = port->state; + + if (port->handle_break) + port->handle_break(port); + +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL + if (port->has_sysrq && uart_console(port)) { + if (!port->sysrq) { + port->sysrq = jiffies + SYSRQ_TIMEOUT; + return 1; + } + port->sysrq = 0; + } +#endif + if (port->flags & UPF_SAK) + do_SAK(state->port.tty); + return 0; +} /* * UART_ENABLE_MS - determine if port should enable modem status irqs diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 08674cd14d5a5581d383daeb9c745ed61ff58e1d..1e9ed840b9fc101087c860dd6879de95afb950d3 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -430,6 +430,19 @@ static inline void psock_set_prog(struct bpf_prog **pprog, bpf_prog_put(prog); } +static inline int psock_replace_prog(struct bpf_prog **pprog, + struct bpf_prog *prog, + struct bpf_prog *old) +{ + if (cmpxchg(pprog, old, prog) != old) + return -ENOENT; + + if (old) + bpf_prog_put(old); + + return 0; +} + static inline void psock_progs_drop(struct sk_psock_progs *progs) { psock_set_prog(&progs->msg_parser, NULL); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9aac824c523cf051535f0a9015ab17cea6645789..a1bbaa1c1a3a3ad5ba64faac2a6700e1c2b96e59 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -220,7 +220,9 @@ struct tcp_sock { } rack; u16 advmss; /* Advertised MSS */ u8 compressed_ack; - u8 dup_ack_counter; + u8 dup_ack_counter:2, + tlp_retrans:1, /* TLP is a retransmission */ + unused:5; u32 chrono_start; /* Start time in jiffies of a TCP chrono */ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u8 chrono_type:2, /* current chronograph type */ @@ -243,7 +245,7 @@ struct tcp_sock { save_syn:1, /* Save headers of SYN packet */ is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */ syn_smc:1; /* SYN includes SMC */ - u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ + u32 tlp_high_seq; /* snd_nxt at the time of TLP */ u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 47eaa34f87619c11b150590f6bbb1f252d806eea..c5afaf8ca7a2965a1bc3ae363d72c6006e77be05 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -15,6 +15,7 @@ #include #include #include +#include #include struct inode; @@ -94,7 +95,7 @@ static inline void simple_xattrs_free(struct simple_xattrs *xattrs) list_for_each_entry_safe(xattr, node, &xattrs->head, list) { kfree(xattr->name); - kfree(xattr); + kvfree(xattr); } } diff --git a/include/net/addrconf.h b/include/net/addrconf.h index fdb07105384caeafa4f1e104b588efbe87791ed4..8418b7d38468a1bf2e159c705dab12c5a64cbd0c 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -274,6 +274,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr); int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); +void __ipv6_sock_ac_close(struct sock *sk); void ipv6_sock_ac_close(struct sock *sk); int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/include/net/devlink.h b/include/net/devlink.h index 1df6dfec26c2e8c9357ff589f76a607012ade891..95b0322a2a824a9b33c86b8b9115e5b463894499 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -718,6 +718,7 @@ enum devlink_trap_group_generic_id { DEVLINK_TRAP_GROUP_GENERIC_ID_PIM, DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB, DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY, + DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY, DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6, DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT, DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL, @@ -915,6 +916,8 @@ enum devlink_trap_group_generic_id { "uc_loopback" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_LOCAL_DELIVERY \ "local_delivery" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_EXTERNAL_DELIVERY \ + "external_delivery" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_IPV6 \ "ipv6" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_EVENT \ diff --git a/include/net/dst.h b/include/net/dst.h index 07adfacd8088ac473f2b4a8d368312a6560e71a0..852d8fb36ab72390c5f417eaff9f9580eac69429 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -400,7 +400,15 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, struct sk_buff *skb) { - struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); + struct neighbour *n = NULL; + + /* The packets from tunnel devices (eg bareudp) may have only + * metadata in the dst pointer of skb. Hence a pointer check of + * neigh_lookup is needed. + */ + if (dst->ops->neigh_lookup) + n = dst->ops->neigh_lookup(dst, skb, NULL); + return IS_ERR(n) ? NULL : n; } diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index a7eba43fe4e4cab9deb228bbbff0f9dd6064ab5f..4b6e36288ddd3b356a262bae4d04912f4493c354 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -372,7 +372,8 @@ flow_dissector_init_keys(struct flow_dissector_key_control *key_control, } #ifdef CONFIG_BPF_SYSCALL -int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog); +int flow_dissector_bpf_prog_attach_check(struct net *net, + struct bpf_prog *prog); #endif /* CONFIG_BPF_SYSCALL */ #endif diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 6315324b9dc2c4d7cd4c979fe08208a547811ca6..3eaf25f68b7957072209692487d818747331a176 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -5,7 +5,6 @@ #include #include #include -#include struct flow_match { struct flow_dissector *dissector; diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 74950663bb00d2be53d3cfb82bcb23ffc35e5324..6e5f1e1aa82267f53d7b4486fe85bfd204758a5e 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -35,13 +35,6 @@ struct genl_info; * do additional, common, filtering and return an error * @post_doit: called after an operation's doit callback, it may * undo operations done by pre_doit, for example release locks - * @mcast_bind: a socket bound to the given multicast group (which - * is given as the offset into the groups array) - * @mcast_unbind: a socket was unbound from the given multicast group. - * Note that unbind() will not be called symmetrically if the - * generic netlink family is removed while there are still open - * sockets. - * @attrbuf: buffer to store parsed attributes (private) * @mcgrps: multicast groups used by this family * @n_mcgrps: number of multicast groups * @mcgrp_offset: starting number of multicast group IDs in this family @@ -64,9 +57,6 @@ struct genl_family { void (*post_doit)(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); - int (*mcast_bind)(struct net *net, int group); - void (*mcast_unbind)(struct net *net, int group); - struct nlattr ** attrbuf; /* private */ const struct genl_ops * ops; const struct genl_multicast_group *mcgrps; unsigned int n_ops; diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 0f0d1efe06ddcd1bcd67000bde1d7b88f0c74153..e1eaf17802889dbb8143ae9b9523de4614708ed5 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -172,7 +173,7 @@ static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) static inline int INET_ECN_set_ce(struct sk_buff *skb) { - switch (skb->protocol) { + switch (skb_protocol(skb, true)) { case cpu_to_be16(ETH_P_IP): if (skb_network_header(skb) + sizeof(struct iphdr) <= skb_tail_pointer(skb)) @@ -191,7 +192,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) static inline int INET_ECN_set_ect1(struct sk_buff *skb) { - switch (skb->protocol) { + switch (skb_protocol(skb, true)) { case cpu_to_be16(ETH_P_IP): if (skb_network_header(skb) + sizeof(struct iphdr) <= skb_tail_pointer(skb)) @@ -272,12 +273,16 @@ static inline int IP_ECN_decapsulate(const struct iphdr *oiph, { __u8 inner; - if (skb->protocol == htons(ETH_P_IP)) + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): inner = ip_hdr(skb)->tos; - else if (skb->protocol == htons(ETH_P_IPV6)) + break; + case htons(ETH_P_IPV6): inner = ipv6_get_dsfield(ipv6_hdr(skb)); - else + break; + default: return 0; + } return INET_ECN_decapsulate(skb, oiph->tos, inner); } @@ -287,12 +292,16 @@ static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h, { __u8 inner; - if (skb->protocol == htons(ETH_P_IP)) + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): inner = ip_hdr(skb)->tos; - else if (skb->protocol == htons(ETH_P_IPV6)) + break; + case htons(ETH_P_IPV6): inner = ipv6_get_dsfield(ipv6_hdr(skb)); - else + break; + default: return 0; + } return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner); } diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 076e5d7db7d3c45663d094aeef1010eeca461635..36025dea7612ac462a2582883a6ea1024c141081 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -290,6 +290,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p, __u32 fwmark); void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); +extern const struct header_ops ip_tunnel_header_ops; +__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); + struct ip_tunnel_encap_ops { size_t (*encap_hlen)(struct ip_tunnel_encap *e); int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, diff --git a/include/net/netns/bpf.h b/include/net/netns/bpf.h index a8dce2a380c8e85e6c32bbe4244f2bac302e68de..0ca6a1b8718533adf2f37223bf4d8955df151246 100644 --- a/include/net/netns/bpf.h +++ b/include/net/netns/bpf.h @@ -9,10 +9,13 @@ #include struct bpf_prog; +struct bpf_prog_array; struct netns_bpf { - struct bpf_prog __rcu *progs[MAX_NETNS_BPF_ATTACH_TYPE]; - struct bpf_link *links[MAX_NETNS_BPF_ATTACH_TYPE]; + /* Array of programs to run compiled from progs or links */ + struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE]; + struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE]; + struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE]; }; #endif /* __NETNS_BPF_H__ */ diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 9092e697059e775af307be69a879386ebfd9924f..ac8c890a2657e35546ec51fe8b8a993a2bd0c91b 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -136,17 +136,6 @@ static inline void qdisc_run(struct Qdisc *q) } } -static inline __be16 tc_skb_protocol(const struct sk_buff *skb) -{ - /* We need to take extra care in case the skb came via - * vlan accelerated path. In that case, use skb->vlan_proto - * as the original vlan header was already stripped. - */ - if (skb_vlan_tag_present(skb)) - return skb->vlan_proto; - return skb->protocol; -} - /* Calculate maximal size of packet seen by hard_start_xmit routine of this device. */ diff --git a/include/net/sock.h b/include/net/sock.h index 3428619faae4340485b200f49d9cce4fb09086b3..1183507df95bfdad4fb159efd50a80e7e2190f9a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -533,7 +533,8 @@ enum sk_pacing { * be copied. */ #define SK_USER_DATA_NOCOPY 1UL -#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY) +#define SK_USER_DATA_BPF 2UL /* Managed by BPF */ +#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF) /** * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c7d213c9f9d8da4e97619bdbc3b8f61cd83d7d22..51f65d23ebafa4be3eea2f416e482714989fee2b 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -941,7 +941,7 @@ struct xfrm_dst { static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst; return xdst->path; @@ -953,7 +953,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { struct xfrm_dst *xdst = (struct xfrm_dst *) dst; return xdst->child; } @@ -1630,13 +1630,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, void *); void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, +struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, + const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx, int delete, int *err); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8, - int dir, u32 id, int delete, int *err); +struct xfrm_policy *xfrm_policy_byid(struct net *net, + const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, + int *err); int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); void xfrm_policy_hash_rebuild(struct net *net); u32 xfrm_get_acqseq(void); diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index a4ff226505c99c06fc6df0e3b0253179e0eff5e0..6842990e2712bd194b60739c995855c2e21dab0f 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -40,7 +40,7 @@ struct xsk_buff_pool { u32 headroom; u32 chunk_size; u32 frame_len; - bool cheap_dma; + bool dma_need_sync; bool unaligned; void *addrs; struct device *dev; @@ -80,7 +80,7 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) { - if (xskb->pool->cheap_dma) + if (!xskb->pool->dma_need_sync) return; xp_dma_sync_for_cpu_slow(xskb); @@ -91,7 +91,7 @@ void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, dma_addr_t dma, size_t size) { - if (pool->cheap_dma) + if (!pool->dma_need_sync) return; xp_dma_sync_for_device_slow(pool, dma, size); diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index c4369a6c295108f1e3642e1782d5b9295f3f4553..2f1fc23602cb6e0116dfa8414b72c00026f00a1a 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -305,6 +305,25 @@ struct rvt_rq { spinlock_t lock ____cacheline_aligned_in_smp; }; +/** + * rvt_get_rq_count - count numbers of request work queue entries + * in circular buffer + * @rq: data structure for request queue entry + * @head: head indices of the circular buffer + * @tail: tail indices of the circular buffer + * + * Return - total number of entries in the Receive Queue + */ + +static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail) +{ + u32 count = head - tail; + + if ((s32)count < 0) + count += rq->size; + return count; +} + /* * This structure holds the information that the send tasklet needs * to send a RDMA read response or atomic operation. diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index 6ce8effa0b128ffe0464e137bb343f36b9c48874..70cbc5095e7250aeb1fca7f4529516422d45cb2a 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -66,6 +66,7 @@ struct snd_compr_runtime { * @direction: stream direction, playback/recording * @metadata_set: metadata set flag, true when set * @next_track: has userspace signal next track transition, true when set + * @partial_drain: undergoing partial_drain for stream, true when set * @private_data: pointer to DSP private data * @dma_buffer: allocated buffer if any */ @@ -78,6 +79,7 @@ struct snd_compr_stream { enum snd_compr_direction direction; bool metadata_set; bool next_track; + bool partial_drain; void *private_data; struct snd_dma_buffer dma_buffer; }; @@ -182,7 +184,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) if (snd_BUG_ON(!stream)) return; - stream->runtime->state = SNDRV_PCM_STATE_SETUP; + /* for partial_drain case we are back to running state on success */ + if (stream->partial_drain) { + stream->runtime->state = SNDRV_PCM_STATE_RUNNING; + stream->partial_drain = false; /* clear this flag as well */ + } else { + stream->runtime->state = SNDRV_PCM_STATE_SETUP; + } wake_up(&stream->runtime->sleep); } diff --git a/include/sound/rt5670.h b/include/sound/rt5670.h index f9024c7a1600fb803efd039fe6ffabf06ee137d4..02e1d777835495b1d1726bc9eed10715df6c0b36 100644 --- a/include/sound/rt5670.h +++ b/include/sound/rt5670.h @@ -12,6 +12,7 @@ struct rt5670_platform_data { int jd_mode; bool in2_diff; bool dev_gpio; + bool gpio1_is_ext_spk_en; bool dmic_en; unsigned int dmic1_data_pin; diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 212257e84facbe1efb36272a74ffe960220c6b9b..71e178c897932863c535f00bafb0d2111b6f9784 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -161,6 +161,7 @@ void snd_soc_dai_resume(struct snd_soc_dai *dai); int snd_soc_dai_compress_new(struct snd_soc_dai *dai, struct snd_soc_pcm_runtime *rtd, int num); bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream); +void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link); void snd_soc_dai_action(struct snd_soc_dai *dai, int stream, int action); static inline void snd_soc_dai_activate(struct snd_soc_dai *dai, diff --git a/include/sound/soc.h b/include/sound/soc.h index 2756f9bcac3e487bd9994e9b9bf88ae169c4cfe6..3ce7f0f5aa929e5216ce2dae4b087b506dfcc38f 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -444,6 +444,8 @@ int devm_snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *component_driver, struct snd_soc_dai_driver *dai_drv, int num_dai); void snd_soc_unregister_component(struct device *dev); +void snd_soc_unregister_component_by_driver(struct device *dev, + const struct snd_soc_component_driver *component_driver); struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev, const char *driver_name); struct snd_soc_component *snd_soc_lookup_component(struct device *dev, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 974a71342aea681d330bff9074810d3c73f125e5..8bd33050b7bbb6e0c5e1d0bf2e1e7ffc11b2caa4 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3171,13 +3171,12 @@ union bpf_attr { * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) * Description * Copy *size* bytes from *data* into a ring buffer *ringbuf*. - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of - * new data availability is sent. - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of - * new data availability is sent unconditionally. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. * Return - * 0, on success; - * < 0, on error. + * 0 on success, or a negative error in case of failure. * * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) * Description @@ -3189,20 +3188,20 @@ union bpf_attr { * void bpf_ringbuf_submit(void *data, u64 flags) * Description * Submit reserved ring buffer sample, pointed to by *data*. - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of - * new data availability is sent. - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of - * new data availability is sent unconditionally. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. * Return * Nothing. Always succeeds. * * void bpf_ringbuf_discard(void *data, u64 flags) * Description * Discard reserved ring buffer sample, pointed to by *data*. - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of - * new data availability is sent. - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of - * new data availability is sent unconditionally. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. * Return * Nothing. Always succeeds. * @@ -3210,16 +3209,18 @@ union bpf_attr { * Description * Query various characteristics of provided ring buffer. What * exactly is queries is determined by *flags*: - * - BPF_RB_AVAIL_DATA - amount of data not yet consumed; - * - BPF_RB_RING_SIZE - the size of ring buffer; - * - BPF_RB_CONS_POS - consumer position (can wrap around); - * - BPF_RB_PROD_POS - producer(s) position (can wrap around); - * Data returned is just a momentary snapshots of actual values + * + * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. + * * **BPF_RB_RING_SIZE**: The size of ring buffer. + * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). + * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). + * + * Data returned is just a momentary snapshot of actual values * and could be inaccurate, so this facility should be used to * power heuristics and for reporting, not to make 100% correct * calculation. * Return - * Requested value, or 0, if flags are not recognized. + * Requested value, or 0, if *flags* are not recognized. * * int bpf_csum_level(struct sk_buff *skb, u64 level) * Description diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index 1f412fbf561bb2262553c2000e6cf938fad23543..e103c1434e4b0ea8861a97a0753a54d63b5dbd57 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -110,9 +110,12 @@ struct dsa_hw_desc { uint16_t rsvd1; union { uint8_t expected_res; + /* create delta record */ struct { uint64_t delta_addr; uint32_t max_delta_size; + uint32_t delt_rsvd; + uint8_t expected_res_mask; }; uint32_t delta_rec_size; uint64_t dest2; diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index b6a835d37826384e87feb1446809e983379bda1f..0c2e27d28e0acdf825bab42cbffb452275f065ed 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -888,7 +888,8 @@ #define SW_LINEIN_INSERT 0x0d /* set = inserted */ #define SW_MUTE_DEVICE 0x0e /* set = device disabled */ #define SW_PEN_INSERTED 0x0f /* set = pen inserted */ -#define SW_MAX 0x0f +#define SW_MACHINE_COVER 0x10 /* set = cover closed */ +#define SW_MAX 0x10 #define SW_CNT (SW_MAX+1) /* diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 92c22699a5a74e44111926657e0c65ac74f135a2..7843742b8b741e98118ed79fee7ad947dc750826 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -197,6 +197,7 @@ struct io_sqring_offsets { * sq_ring->flags */ #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */ +#define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */ struct io_cqring_offsets { __u32 head; diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h index 9cec58a6a5eaa7cfbb5eb28e00ffe19fc63b4032..f79d7abe27dba9cb2af258075d62b1ecad1ab5d4 100644 --- a/include/uapi/linux/vboxguest.h +++ b/include/uapi/linux/vboxguest.h @@ -103,7 +103,7 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20); /* IOCTL to perform a VMM Device request larger then 1KB. */ -#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) +#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IO('V', 3) /** VBG_IOCTL_HGCM_CONNECT data structure. */ @@ -198,7 +198,7 @@ struct vbg_ioctl_log { } u; }; -#define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) +#define VBG_IOCTL_LOG(s) _IO('V', 9) /** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */ diff --git a/kernel/audit.c b/kernel/audit.c index 8c201f414226246f7f6c173faa721423aabff51f..b2301bdc977369e7ad75c60a6d9d22b1021b8bd3 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1851,7 +1851,6 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, } audit_get_stamp(ab->ctx, &t, &serial); - audit_clear_dummy(ab->ctx); audit_log_format(ab, "audit(%llu.%03lu:%u): ", (unsigned long long)t.tv_sec, t.tv_nsec/1000000, serial); diff --git a/kernel/audit.h b/kernel/audit.h index f0233dc40b1730a4229fcc4d7f82ce00f76f8145..ddc22878433d014303c17be97a26dc68442a5212 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -290,13 +290,6 @@ extern int audit_signal_info_syscall(struct task_struct *t); extern void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx); extern struct list_head *audit_killed_trees(void); - -static inline void audit_clear_dummy(struct audit_context *ctx) -{ - if (ctx) - ctx->dummy = 0; -} - #else /* CONFIG_AUDITSYSCALL */ #define auditsc_get_stamp(c, t, s) 0 #define audit_put_watch(w) {} @@ -330,7 +323,6 @@ static inline int audit_signal_info_syscall(struct task_struct *t) } #define audit_filter_inodes(t, c) AUDIT_DISABLED -#define audit_clear_dummy(c) {} #endif /* CONFIG_AUDITSYSCALL */ extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 468a2339045784c9260c2d80a1335b1090b3280a..fd840c40abf7ac4791d2fa724302ebe1241a67b1 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1417,6 +1417,9 @@ static void audit_log_proctitle(void) struct audit_context *context = audit_context(); struct audit_buffer *ab; + if (!context || context->dummy) + return; + ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); if (!ab) return; /* audit_panic or being filtered */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 58c9af1d480820332a332685790027cdc99fcc52..0443600146dc27edd385e5c7dd594b087605a012 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3746,7 +3746,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, return false; t = btf_type_skip_modifiers(btf, t->type, NULL); - if (!btf_type_is_int(t)) { + if (!btf_type_is_small_int(t)) { bpf_log(log, "ret type %s not allowed for fmod_ret\n", btf_kind_str[BTF_INFO_KIND(t->info)]); @@ -3768,7 +3768,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, /* skip modifiers */ while (btf_type_is_modifier(t)) t = btf_type_by_id(btf, t->type); - if (btf_type_is_int(t) || btf_type_is_enum(t)) + if (btf_type_is_small_int(t) || btf_type_is_enum(t)) /* accessing a scalar */ return true; if (!btf_type_is_ptr(t)) { @@ -4058,6 +4058,11 @@ static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn, const char *tname, *sym; u32 btf_id, i; + if (!btf_vmlinux) { + bpf_log(log, "btf_vmlinux doesn't exist\n"); + return -EINVAL; + } + if (IS_ERR(btf_vmlinux)) { bpf_log(log, "btf_vmlinux is malformed\n"); return -EINVAL; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index b4b288a3c3c915dea56febe4ba8b3867c5d95ec4..b32cc8ce8ff6fc4e4f8ec7f5b4f154c45588843c 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -779,15 +779,20 @@ static void htab_elem_free_rcu(struct rcu_head *head) htab_elem_free(htab, l); } -static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) { struct bpf_map *map = &htab->map; + void *ptr; if (map->ops->map_fd_put_ptr) { - void *ptr = fd_htab_map_get_ptr(map, l); - + ptr = fd_htab_map_get_ptr(map, l); map->ops->map_fd_put_ptr(ptr); } +} + +static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +{ + htab_put_fd_value(htab, l); if (htab_is_prealloc(htab)) { __pcpu_freelist_push(&htab->freelist, &l->fnode); @@ -839,6 +844,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, */ pl_new = this_cpu_ptr(htab->extra_elems); l_new = *pl_new; + htab_put_fd_value(htab, old_elem); *pl_new = old_elem; } else { struct pcpu_freelist_node *l; diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 78cf061f817938560117a7a6bbed007b9660810e..310241ca79912a62007e83accfe6307cc2e8e01a 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -19,18 +19,21 @@ struct bpf_netns_link { * with netns_bpf_mutex held. */ struct net *net; + struct list_head node; /* node in list of links attached to net */ }; /* Protects updates to netns_bpf */ DEFINE_MUTEX(netns_bpf_mutex); /* Must be called with netns_bpf_mutex held. */ -static void __net_exit bpf_netns_link_auto_detach(struct bpf_link *link) +static void netns_bpf_run_array_detach(struct net *net, + enum netns_bpf_attach_type type) { - struct bpf_netns_link *net_link = - container_of(link, struct bpf_netns_link, link); + struct bpf_prog_array *run_array; - net_link->net = NULL; + run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL, + lockdep_is_held(&netns_bpf_mutex)); + bpf_prog_array_free(run_array); } static void bpf_netns_link_release(struct bpf_link *link) @@ -40,22 +43,18 @@ static void bpf_netns_link_release(struct bpf_link *link) enum netns_bpf_attach_type type = net_link->netns_type; struct net *net; - /* Link auto-detached by dying netns. */ - if (!net_link->net) - return; - mutex_lock(&netns_bpf_mutex); - /* Recheck after potential sleep. We can race with cleanup_net - * here, but if we see a non-NULL struct net pointer pre_exit - * has not happened yet and will block on netns_bpf_mutex. + /* We can race with cleanup_net, but if we see a non-NULL + * struct net pointer, pre_exit has not run yet and wait for + * netns_bpf_mutex. */ net = net_link->net; if (!net) goto out_unlock; - net->bpf.links[type] = NULL; - RCU_INIT_POINTER(net->bpf.progs[type], NULL); + netns_bpf_run_array_detach(net, type); + list_del(&net_link->node); out_unlock: mutex_unlock(&netns_bpf_mutex); @@ -76,6 +75,7 @@ static int bpf_netns_link_update_prog(struct bpf_link *link, struct bpf_netns_link *net_link = container_of(link, struct bpf_netns_link, link); enum netns_bpf_attach_type type = net_link->netns_type; + struct bpf_prog_array *run_array; struct net *net; int ret = 0; @@ -93,8 +93,11 @@ static int bpf_netns_link_update_prog(struct bpf_link *link, goto out_unlock; } + run_array = rcu_dereference_protected(net->bpf.run_array[type], + lockdep_is_held(&netns_bpf_mutex)); + WRITE_ONCE(run_array->items[0].prog, new_prog); + old_prog = xchg(&link->prog, new_prog); - rcu_assign_pointer(net->bpf.progs[type], new_prog); bpf_prog_put(old_prog); out_unlock: @@ -142,14 +145,38 @@ static const struct bpf_link_ops bpf_netns_link_ops = { .show_fdinfo = bpf_netns_link_show_fdinfo, }; +/* Must be called with netns_bpf_mutex held. */ +static int __netns_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr, + struct net *net, + enum netns_bpf_attach_type type) +{ + __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + struct bpf_prog_array *run_array; + u32 prog_cnt = 0, flags = 0; + + run_array = rcu_dereference_protected(net->bpf.run_array[type], + lockdep_is_held(&netns_bpf_mutex)); + if (run_array) + prog_cnt = bpf_prog_array_length(run_array); + + if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) + return -EFAULT; + if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) + return -EFAULT; + if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) + return 0; + + return bpf_prog_array_copy_to_user(run_array, prog_ids, + attr->query.prog_cnt); +} + int netns_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { - __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); - u32 prog_id, prog_cnt = 0, flags = 0; enum netns_bpf_attach_type type; - struct bpf_prog *attached; struct net *net; + int ret; if (attr->query.query_flags) return -EINVAL; @@ -162,36 +189,25 @@ int netns_bpf_prog_query(const union bpf_attr *attr, if (IS_ERR(net)) return PTR_ERR(net); - rcu_read_lock(); - attached = rcu_dereference(net->bpf.progs[type]); - if (attached) { - prog_cnt = 1; - prog_id = attached->aux->id; - } - rcu_read_unlock(); + mutex_lock(&netns_bpf_mutex); + ret = __netns_bpf_prog_query(attr, uattr, net, type); + mutex_unlock(&netns_bpf_mutex); put_net(net); - - if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) - return -EFAULT; - if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt))) - return -EFAULT; - - if (!attr->query.prog_cnt || !prog_ids || !prog_cnt) - return 0; - - if (copy_to_user(prog_ids, &prog_id, sizeof(u32))) - return -EFAULT; - - return 0; + return ret; } int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) { + struct bpf_prog_array *run_array; enum netns_bpf_attach_type type; + struct bpf_prog *attached; struct net *net; int ret; + if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd) + return -EINVAL; + type = to_netns_bpf_attach_type(attr->attach_type); if (type < 0) return -EINVAL; @@ -200,19 +216,47 @@ int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) mutex_lock(&netns_bpf_mutex); /* Attaching prog directly is not compatible with links */ - if (net->bpf.links[type]) { + if (!list_empty(&net->bpf.links[type])) { ret = -EEXIST; goto out_unlock; } switch (type) { case NETNS_BPF_FLOW_DISSECTOR: - ret = flow_dissector_bpf_prog_attach(net, prog); + ret = flow_dissector_bpf_prog_attach_check(net, prog); break; default: ret = -EINVAL; break; } + if (ret) + goto out_unlock; + + attached = net->bpf.progs[type]; + if (attached == prog) { + /* The same program cannot be attached twice */ + ret = -EINVAL; + goto out_unlock; + } + + run_array = rcu_dereference_protected(net->bpf.run_array[type], + lockdep_is_held(&netns_bpf_mutex)); + if (run_array) { + WRITE_ONCE(run_array->items[0].prog, prog); + } else { + run_array = bpf_prog_array_alloc(1, GFP_KERNEL); + if (!run_array) { + ret = -ENOMEM; + goto out_unlock; + } + run_array->items[0].prog = prog; + rcu_assign_pointer(net->bpf.run_array[type], run_array); + } + + net->bpf.progs[type] = prog; + if (attached) + bpf_prog_put(attached); + out_unlock: mutex_unlock(&netns_bpf_mutex); @@ -221,63 +265,74 @@ int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) /* Must be called with netns_bpf_mutex held. */ static int __netns_bpf_prog_detach(struct net *net, - enum netns_bpf_attach_type type) + enum netns_bpf_attach_type type, + struct bpf_prog *old) { struct bpf_prog *attached; /* Progs attached via links cannot be detached */ - if (net->bpf.links[type]) + if (!list_empty(&net->bpf.links[type])) return -EINVAL; - attached = rcu_dereference_protected(net->bpf.progs[type], - lockdep_is_held(&netns_bpf_mutex)); - if (!attached) + attached = net->bpf.progs[type]; + if (!attached || attached != old) return -ENOENT; - RCU_INIT_POINTER(net->bpf.progs[type], NULL); + netns_bpf_run_array_detach(net, type); + net->bpf.progs[type] = NULL; bpf_prog_put(attached); return 0; } -int netns_bpf_prog_detach(const union bpf_attr *attr) +int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) { enum netns_bpf_attach_type type; + struct bpf_prog *prog; int ret; + if (attr->target_fd) + return -EINVAL; + type = to_netns_bpf_attach_type(attr->attach_type); if (type < 0) return -EINVAL; + prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); + if (IS_ERR(prog)) + return PTR_ERR(prog); + mutex_lock(&netns_bpf_mutex); - ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type); + ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type, prog); mutex_unlock(&netns_bpf_mutex); + bpf_prog_put(prog); + return ret; } static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, enum netns_bpf_attach_type type) { - struct bpf_prog *prog; + struct bpf_netns_link *net_link = + container_of(link, struct bpf_netns_link, link); + struct bpf_prog_array *run_array; int err; mutex_lock(&netns_bpf_mutex); /* Allow attaching only one prog or link for now */ - if (net->bpf.links[type]) { + if (!list_empty(&net->bpf.links[type])) { err = -E2BIG; goto out_unlock; } /* Links are not compatible with attaching prog directly */ - prog = rcu_dereference_protected(net->bpf.progs[type], - lockdep_is_held(&netns_bpf_mutex)); - if (prog) { + if (net->bpf.progs[type]) { err = -EEXIST; goto out_unlock; } switch (type) { case NETNS_BPF_FLOW_DISSECTOR: - err = flow_dissector_bpf_prog_attach(net, link->prog); + err = flow_dissector_bpf_prog_attach_check(net, link->prog); break; default: err = -EINVAL; @@ -286,7 +341,15 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, if (err) goto out_unlock; - net->bpf.links[type] = link; + run_array = bpf_prog_array_alloc(1, GFP_KERNEL); + if (!run_array) { + err = -ENOMEM; + goto out_unlock; + } + run_array->items[0].prog = link->prog; + rcu_assign_pointer(net->bpf.run_array[type], run_array); + + list_add_tail(&net_link->node, &net->bpf.links[type]); out_unlock: mutex_unlock(&netns_bpf_mutex); @@ -345,23 +408,34 @@ int netns_bpf_link_create(const union bpf_attr *attr, struct bpf_prog *prog) return err; } +static int __net_init netns_bpf_pernet_init(struct net *net) +{ + int type; + + for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) + INIT_LIST_HEAD(&net->bpf.links[type]); + + return 0; +} + static void __net_exit netns_bpf_pernet_pre_exit(struct net *net) { enum netns_bpf_attach_type type; - struct bpf_link *link; + struct bpf_netns_link *net_link; mutex_lock(&netns_bpf_mutex); for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) { - link = net->bpf.links[type]; - if (link) - bpf_netns_link_auto_detach(link); - else - __netns_bpf_prog_detach(net, type); + netns_bpf_run_array_detach(net, type); + list_for_each_entry(net_link, &net->bpf.links[type], node) + net_link->net = NULL; /* auto-detach link */ + if (net->bpf.progs[type]) + bpf_prog_put(net->bpf.progs[type]); } mutex_unlock(&netns_bpf_mutex); } static struct pernet_operations netns_bpf_pernet_ops __net_initdata = { + .init = netns_bpf_pernet_init, .pre_exit = netns_bpf_pernet_pre_exit, }; diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c index 21cde24386db4b73ed4388f8cb3c183e57a58160..cae9d505e04ace141bfdc64df04a4caabcc62052 100644 --- a/kernel/bpf/reuseport_array.c +++ b/kernel/bpf/reuseport_array.c @@ -20,11 +20,14 @@ static struct reuseport_array *reuseport_array(struct bpf_map *map) /* The caller must hold the reuseport_lock */ void bpf_sk_reuseport_detach(struct sock *sk) { - struct sock __rcu **socks; + uintptr_t sk_user_data; write_lock_bh(&sk->sk_callback_lock); - socks = sk->sk_user_data; - if (socks) { + sk_user_data = (uintptr_t)sk->sk_user_data; + if (sk_user_data & SK_USER_DATA_BPF) { + struct sock __rcu **socks; + + socks = (void *)(sk_user_data & SK_USER_DATA_PTRMASK); WRITE_ONCE(sk->sk_user_data, NULL); /* * Do not move this NULL assignment outside of @@ -252,6 +255,7 @@ int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, struct sock *free_osk = NULL, *osk, *nsk; struct sock_reuseport *reuse; u32 index = *(u32 *)key; + uintptr_t sk_user_data; struct socket *socket; int err, fd; @@ -305,7 +309,9 @@ int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, if (err) goto put_file_unlock; - WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]); + sk_user_data = (uintptr_t)&array->ptrs[index] | SK_USER_DATA_NOCOPY | + SK_USER_DATA_BPF; + WRITE_ONCE(nsk->sk_user_data, (void *)sk_user_data); rcu_assign_pointer(array->ptrs[index], nsk); free_osk = osk; err = 0; diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index 180414bb0d3e9a94d26225f15370b139d0d7689f..0af88bbc1c15307d3964acd8810453f7c0276208 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -132,15 +132,6 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node) { struct bpf_ringbuf *rb; - if (!data_sz || !PAGE_ALIGNED(data_sz)) - return ERR_PTR(-EINVAL); - -#ifdef CONFIG_64BIT - /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */ - if (data_sz > RINGBUF_MAX_DATA_SZ) - return ERR_PTR(-E2BIG); -#endif - rb = bpf_ringbuf_area_alloc(data_sz, numa_node); if (!rb) return ERR_PTR(-ENOMEM); @@ -166,9 +157,16 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr) return ERR_PTR(-EINVAL); if (attr->key_size || attr->value_size || - attr->max_entries == 0 || !PAGE_ALIGNED(attr->max_entries)) + !is_power_of_2(attr->max_entries) || + !PAGE_ALIGNED(attr->max_entries)) return ERR_PTR(-EINVAL); +#ifdef CONFIG_64BIT + /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */ + if (attr->max_entries > RINGBUF_MAX_DATA_SZ) + return ERR_PTR(-E2BIG); +#endif + rb_map = kzalloc(sizeof(*rb_map), GFP_USER); if (!rb_map) return ERR_PTR(-ENOMEM); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8da159936bab17e4b322561457ca03f8608dffd3..0fd80ac81f705725a3ebc3234eadd4f7dae4d7cc 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2121,7 +2121,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) !bpf_capable()) return -EPERM; - if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN)) + if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN)) return -EPERM; if (is_perfmon_prog_type(type) && !perfmon_capable()) return -EPERM; @@ -2893,13 +2893,11 @@ static int bpf_prog_detach(const union bpf_attr *attr) switch (ptype) { case BPF_PROG_TYPE_SK_MSG: case BPF_PROG_TYPE_SK_SKB: - return sock_map_get_from_fd(attr, NULL); + return sock_map_prog_detach(attr, ptype); case BPF_PROG_TYPE_LIRC_MODE2: return lirc_prog_detach(attr); case BPF_PROG_TYPE_FLOW_DISSECTOR: - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - return netns_bpf_prog_detach(attr); + return netns_bpf_prog_detach(attr, ptype); case BPF_PROG_TYPE_CGROUP_DEVICE: case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: @@ -3139,7 +3137,8 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, return NULL; } -static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) +static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, + const struct cred *f_cred) { const struct bpf_map *map; struct bpf_insn *insns; @@ -3165,7 +3164,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) code == (BPF_JMP | BPF_CALL_ARGS)) { if (code == (BPF_JMP | BPF_CALL_ARGS)) insns[i].code = BPF_JMP | BPF_CALL; - if (!bpf_dump_raw_ok()) + if (!bpf_dump_raw_ok(f_cred)) insns[i].imm = 0; continue; } @@ -3221,7 +3220,8 @@ static int set_info_rec_size(struct bpf_prog_info *info) return 0; } -static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, +static int bpf_prog_get_info_by_fd(struct file *file, + struct bpf_prog *prog, const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -3290,11 +3290,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, struct bpf_insn *insns_sanitized; bool fault; - if (prog->blinded && !bpf_dump_raw_ok()) { + if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { info.xlated_prog_insns = 0; goto done; } - insns_sanitized = bpf_insn_prepare_dump(prog); + insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); if (!insns_sanitized) return -ENOMEM; uinsns = u64_to_user_ptr(info.xlated_prog_insns); @@ -3328,7 +3328,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, } if (info.jited_prog_len && ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { uinsns = u64_to_user_ptr(info.jited_prog_insns); ulen = min_t(u32, info.jited_prog_len, ulen); @@ -3363,7 +3363,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, ulen = info.nr_jited_ksyms; info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; if (ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { unsigned long ksym_addr; u64 __user *user_ksyms; u32 i; @@ -3394,7 +3394,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, ulen = info.nr_jited_func_lens; info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; if (ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { u32 __user *user_lens; u32 func_len, i; @@ -3451,7 +3451,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, else info.nr_jited_line_info = 0; if (info.nr_jited_line_info && ulen) { - if (bpf_dump_raw_ok()) { + if (bpf_dump_raw_ok(file->f_cred)) { __u64 __user *user_linfo; u32 i; @@ -3497,7 +3497,8 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, return 0; } -static int bpf_map_get_info_by_fd(struct bpf_map *map, +static int bpf_map_get_info_by_fd(struct file *file, + struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -3540,7 +3541,8 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map, return 0; } -static int bpf_btf_get_info_by_fd(struct btf *btf, +static int bpf_btf_get_info_by_fd(struct file *file, + struct btf *btf, const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -3555,7 +3557,8 @@ static int bpf_btf_get_info_by_fd(struct btf *btf, return btf_get_info_by_fd(btf, attr, uattr); } -static int bpf_link_get_info_by_fd(struct bpf_link *link, +static int bpf_link_get_info_by_fd(struct file *file, + struct bpf_link *link, const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -3608,15 +3611,15 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, return -EBADFD; if (f.file->f_op == &bpf_prog_fops) - err = bpf_prog_get_info_by_fd(f.file->private_data, attr, + err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, uattr); else if (f.file->f_op == &bpf_map_fops) - err = bpf_map_get_info_by_fd(f.file->private_data, attr, + err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, uattr); else if (f.file->f_op == &btf_fops) - err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr); + err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); else if (f.file->f_op == &bpf_link_fops) - err = bpf_link_get_info_by_fd(f.file->private_data, + err = bpf_link_get_info_by_fd(f.file, f.file->private_data, attr, uattr); else err = -EINVAL; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 34cde841ab6819ebc61f992b38a61b66228ce8fb..94cead5a43e57e949e71d177594afca431468c49 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -399,8 +399,7 @@ static bool reg_type_not_null(enum bpf_reg_type type) return type == PTR_TO_SOCKET || type == PTR_TO_TCP_SOCK || type == PTR_TO_MAP_VALUE || - type == PTR_TO_SOCK_COMMON || - type == PTR_TO_BTF_ID; + type == PTR_TO_SOCK_COMMON; } static bool reg_type_may_be_null(enum bpf_reg_type type) @@ -9801,7 +9800,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) int i, j, subprog_start, subprog_end = 0, len, subprog; struct bpf_insn *insn; void *old_bpf_func; - int err; + int err, num_exentries; if (env->subprog_cnt <= 1) return 0; @@ -9876,6 +9875,14 @@ static int jit_subprogs(struct bpf_verifier_env *env) func[i]->aux->nr_linfo = prog->aux->nr_linfo; func[i]->aux->jited_linfo = prog->aux->jited_linfo; func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; + num_exentries = 0; + insn = func[i]->insnsi; + for (j = 0; j < func[i]->len; j++, insn++) { + if (BPF_CLASS(insn->code) == BPF_LDX && + BPF_MODE(insn->code) == BPF_PROBE_MEM) + num_exentries++; + } + func[i]->aux->num_exentries = num_exentries; func[i] = bpf_int_jit_compile(func[i]); if (!func[i]->jited) { err = -ENOTSUPP; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 1ea181a58465fd16889a2f18cbc3b6be48d3d956..dd247747ec14a622d47037e66e5b43dfbd4e6f24 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -6439,18 +6439,8 @@ void cgroup_sk_alloc_disable(void) void cgroup_sk_alloc(struct sock_cgroup_data *skcd) { - if (cgroup_sk_alloc_disabled) - return; - - /* Socket clone path */ - if (skcd->val) { - /* - * We might be cloning a socket which is left in an empty - * cgroup and the cgroup might have already been rmdir'd. - * Don't use cgroup_get_live(). - */ - cgroup_get(sock_cgroup_ptr(skcd)); - cgroup_bpf_get(sock_cgroup_ptr(skcd)); + if (cgroup_sk_alloc_disabled) { + skcd->no_refcnt = 1; return; } @@ -6475,10 +6465,27 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd) rcu_read_unlock(); } +void cgroup_sk_clone(struct sock_cgroup_data *skcd) +{ + if (skcd->val) { + if (skcd->no_refcnt) + return; + /* + * We might be cloning a socket which is left in an empty + * cgroup and the cgroup might have already been rmdir'd. + * Don't use cgroup_get_live(). + */ + cgroup_get(sock_cgroup_ptr(skcd)); + cgroup_bpf_get(sock_cgroup_ptr(skcd)); + } +} + void cgroup_sk_free(struct sock_cgroup_data *skcd) { struct cgroup *cgrp = sock_cgroup_ptr(skcd); + if (skcd->no_refcnt) + return; cgroup_bpf_put(cgrp); cgroup_put(cgrp); } diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c index 61774aec46b4c8ee90f60daa384db9f7a9625d60..a790026e42d01006d8c9107e0a6cb303ba818023 100644 --- a/kernel/debug/gdbstub.c +++ b/kernel/debug/gdbstub.c @@ -792,6 +792,19 @@ static void gdb_cmd_query(struct kgdb_state *ks) } break; #endif +#ifdef CONFIG_HAVE_ARCH_KGDB_QXFER_PKT + case 'S': + if (!strncmp(remcom_in_buffer, "qSupported:", 11)) + strcpy(remcom_out_buffer, kgdb_arch_gdb_stub_feature); + break; + case 'X': + if (!strncmp(remcom_in_buffer, "qXfer:", 6)) + kgdb_arch_handle_qxfer_pkt(remcom_in_buffer, + remcom_out_buffer); + break; +#endif + default: + break; } } diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 93f578a8e613ba7d4d5cde3f62d94943ce679526..67f060b86a73fa0ee8be935ef6dc4e60b9247f2a 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -70,7 +70,7 @@ gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, return 0; } -static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) +bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { return phys_to_dma_direct(dev, phys) + size - 1 <= min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); @@ -539,3 +539,9 @@ size_t dma_direct_max_mapping_size(struct device *dev) return swiotlb_max_mapping_size(dev); return SIZE_MAX; } + +bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) +{ + return !dev_is_dma_coherent(dev) || + is_swiotlb_buffer(dma_to_phys(dev, dma_addr)); +} diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 98e3d873792ea4cf7bb5f84416dfd492f5da792d..a8c18c9a796fdc6525d4cf006fe56efd0f99fb81 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -397,6 +397,16 @@ size_t dma_max_mapping_size(struct device *dev) } EXPORT_SYMBOL_GPL(dma_max_mapping_size); +bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) +{ + const struct dma_map_ops *ops = get_dma_ops(dev); + + if (dma_is_direct(ops)) + return dma_direct_need_sync(dev, dma_addr); + return ops->sync_single_for_cpu || ops->sync_single_for_device; +} +EXPORT_SYMBOL_GPL(dma_need_sync); + unsigned long dma_get_merge_boundary(struct device *dev) { const struct dma_map_ops *ops = get_dma_ops(dev); diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index 8cfa01243ed27b6fa3d7ce5718fc69b2d6a55cf1..6bc74a2d51273e3d32c841844f8f0f8f2eac079d 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -69,12 +68,7 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, do { pool_size = 1 << (PAGE_SHIFT + order); - - if (dev_get_cma_area(NULL)) - page = dma_alloc_from_contiguous(NULL, 1 << order, - order, false); - else - page = alloc_pages(gfp, order); + page = alloc_pages(gfp, order); } while (!page && order-- > 0); if (!page) goto out; @@ -118,8 +112,7 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, dma_common_free_remap(addr, pool_size); #endif free_page: __maybe_unused - if (!dma_release_from_contiguous(NULL, page, 1 << order)) - __free_pages(page, order); + __free_pages(page, order); out: return ret; } @@ -203,7 +196,7 @@ static int __init dma_atomic_pool_init(void) } postcore_initcall(dma_atomic_pool_init); -static inline struct gen_pool *dev_to_pool(struct device *dev) +static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev) { u64 phys_mask; gfp_t gfp; @@ -217,47 +210,79 @@ static inline struct gen_pool *dev_to_pool(struct device *dev) return atomic_pool_kernel; } -static bool dma_in_atomic_pool(struct device *dev, void *start, size_t size) +static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool) +{ + if (bad_pool == atomic_pool_kernel) + return atomic_pool_dma32 ? : atomic_pool_dma; + + if (bad_pool == atomic_pool_dma32) + return atomic_pool_dma; + + return NULL; +} + +static inline struct gen_pool *dma_guess_pool(struct device *dev, + struct gen_pool *bad_pool) { - struct gen_pool *pool = dev_to_pool(dev); + if (bad_pool) + return dma_get_safer_pool(bad_pool); - if (unlikely(!pool)) - return false; - return gen_pool_has_addr(pool, (unsigned long)start, size); + return dma_guess_pool_from_device(dev); } void *dma_alloc_from_pool(struct device *dev, size_t size, struct page **ret_page, gfp_t flags) { - struct gen_pool *pool = dev_to_pool(dev); - unsigned long val; + struct gen_pool *pool = NULL; + unsigned long val = 0; void *ptr = NULL; - - if (!pool) { - WARN(1, "%pGg atomic pool not initialised!\n", &flags); - return NULL; + phys_addr_t phys; + + while (1) { + pool = dma_guess_pool(dev, pool); + if (!pool) { + WARN(1, "Failed to get suitable pool for %s\n", + dev_name(dev)); + break; + } + + val = gen_pool_alloc(pool, size); + if (!val) + continue; + + phys = gen_pool_virt_to_phys(pool, val); + if (dma_coherent_ok(dev, phys, size)) + break; + + gen_pool_free(pool, val, size); + val = 0; } - val = gen_pool_alloc(pool, size); - if (val) { - phys_addr_t phys = gen_pool_virt_to_phys(pool, val); + if (val) { *ret_page = pfn_to_page(__phys_to_pfn(phys)); ptr = (void *)val; memset(ptr, 0, size); + + if (gen_pool_avail(pool) < atomic_pool_size) + schedule_work(&atomic_pool_work); } - if (gen_pool_avail(pool) < atomic_pool_size) - schedule_work(&atomic_pool_work); return ptr; } bool dma_free_from_pool(struct device *dev, void *start, size_t size) { - struct gen_pool *pool = dev_to_pool(dev); + struct gen_pool *pool = NULL; + + while (1) { + pool = dma_guess_pool(dev, pool); + if (!pool) + return false; - if (!dma_in_atomic_pool(dev, start, size)) - return false; - gen_pool_free(pool, (unsigned long)start, size); - return true; + if (gen_pool_has_addr(pool, (unsigned long)start, size)) { + gen_pool_free(pool, (unsigned long)start, size); + return true; + } + } } diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index bb0862873dbaa1e65c0602aa65c4a00492dd95b4..5f8b0c52fd2ef671502b8dc268e5fe1831230be6 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -2199,7 +2199,7 @@ static void handle_swbp(struct pt_regs *regs) if (!uprobe) { if (is_swbp > 0) { /* No matching uprobe; signal SIGTRAP. */ - send_sig(SIGTRAP, current, 0); + force_sig(SIGTRAP); } else { /* * Either we raced with uprobe_unregister() or we can't diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 76191116843879405269061b9b437a3d0762fea4..2a9fec53e15910124dc8190c13ad4bfde3baca63 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -195,9 +195,9 @@ void irq_set_thread_affinity(struct irq_desc *desc) set_bit(IRQTF_AFFINITY, &action->thread_flags); } +#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK static void irq_validate_effective_affinity(struct irq_data *data) { -#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK const struct cpumask *m = irq_data_get_effective_affinity_mask(data); struct irq_chip *chip = irq_data_get_irq_chip(data); @@ -205,9 +205,19 @@ static void irq_validate_effective_affinity(struct irq_data *data) return; pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", chip->name, data->irq); -#endif } +static inline void irq_init_effective_affinity(struct irq_data *data, + const struct cpumask *mask) +{ + cpumask_copy(irq_data_get_effective_affinity_mask(data), mask); +} +#else +static inline void irq_validate_effective_affinity(struct irq_data *data) { } +static inline void irq_init_effective_affinity(struct irq_data *data, + const struct cpumask *mask) { } +#endif + int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -304,6 +314,26 @@ static int irq_try_set_affinity(struct irq_data *data, return ret; } +static bool irq_set_affinity_deactivated(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + /* + * If the interrupt is not yet activated, just store the affinity + * mask and do not call the chip driver at all. On activation the + * driver has to make sure anyway that the interrupt is in a + * useable state so startup works. + */ + if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data)) + return false; + + cpumask_copy(desc->irq_common_data.affinity, mask); + irq_init_effective_affinity(data, mask); + irqd_set(data, IRQD_AFFINITY_SET); + return true; +} + int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -314,6 +344,9 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, if (!chip || !chip->irq_set_affinity) return -EINVAL; + if (irq_set_affinity_deactivated(data, mask, force)) + return 0; + if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { ret = irq_try_set_affinity(data, mask, force); } else { diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 16c8c605f4b0facfc751198a8450cc91405e2f1c..bb14e64f62a48eddc2f5ca1ede0e156fde55af15 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -644,19 +644,20 @@ static inline int kallsyms_for_perf(void) * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to * block even that). */ -int kallsyms_show_value(void) +bool kallsyms_show_value(const struct cred *cred) { switch (kptr_restrict) { case 0: if (kallsyms_for_perf()) - return 1; + return true; /* fallthrough */ case 1: - if (has_capability_noaudit(current, CAP_SYSLOG)) - return 1; + if (security_capable(cred, &init_user_ns, CAP_SYSLOG, + CAP_OPT_NOAUDIT) == 0) + return true; /* fallthrough */ default: - return 0; + return false; } } @@ -673,7 +674,11 @@ static int kallsyms_open(struct inode *inode, struct file *file) return -ENOMEM; reset_iter(iter, 0); - iter->show_value = kallsyms_show_value(); + /* + * Instead of checking this on every s_show() call, cache + * the result here at open time. + */ + iter->show_value = kallsyms_show_value(file->f_cred); return 0; } diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 4a904cc56d68f922dbf807fb54f9593905a6f7f0..2e97febeef77dfa96ee0565ef7cf097ef17bef86 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2448,7 +2448,7 @@ static void report_probe(struct seq_file *pi, struct kprobe *p, else kprobe_type = "k"; - if (!kallsyms_show_value()) + if (!kallsyms_show_value(pi->file->f_cred)) addr = NULL; if (sym) @@ -2540,7 +2540,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) * If /proc/kallsyms is not showing kernel address, we won't * show them here either. */ - if (!kallsyms_show_value()) + if (!kallsyms_show_value(m->file->f_cred)) seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, (void *)ent->start_addr); else diff --git a/kernel/module.c b/kernel/module.c index bee1c25ca5c5ec218554799e9764c544c56e9353..aa183c9ac0a256eadb92f6d30f2d2460583bfec8 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1510,8 +1510,7 @@ static inline bool sect_empty(const Elf_Shdr *sect) } struct module_sect_attr { - struct module_attribute mattr; - char *name; + struct bin_attribute battr; unsigned long address; }; @@ -1521,13 +1520,18 @@ struct module_sect_attrs { struct module_sect_attr attrs[]; }; -static ssize_t module_sect_show(struct module_attribute *mattr, - struct module_kobject *mk, char *buf) +static ssize_t module_sect_read(struct file *file, struct kobject *kobj, + struct bin_attribute *battr, + char *buf, loff_t pos, size_t count) { struct module_sect_attr *sattr = - container_of(mattr, struct module_sect_attr, mattr); - return sprintf(buf, "0x%px\n", kptr_restrict < 2 ? - (void *)sattr->address : NULL); + container_of(battr, struct module_sect_attr, battr); + + if (pos != 0) + return -EINVAL; + + return sprintf(buf, "0x%px\n", + kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL); } static void free_sect_attrs(struct module_sect_attrs *sect_attrs) @@ -1535,7 +1539,7 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs) unsigned int section; for (section = 0; section < sect_attrs->nsections; section++) - kfree(sect_attrs->attrs[section].name); + kfree(sect_attrs->attrs[section].battr.attr.name); kfree(sect_attrs); } @@ -1544,42 +1548,41 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info) unsigned int nloaded = 0, i, size[2]; struct module_sect_attrs *sect_attrs; struct module_sect_attr *sattr; - struct attribute **gattr; + struct bin_attribute **gattr; /* Count loaded sections and allocate structures */ for (i = 0; i < info->hdr->e_shnum; i++) if (!sect_empty(&info->sechdrs[i])) nloaded++; size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded), - sizeof(sect_attrs->grp.attrs[0])); - size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); + sizeof(sect_attrs->grp.bin_attrs[0])); + size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]); sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); if (sect_attrs == NULL) return; /* Setup section attributes. */ sect_attrs->grp.name = "sections"; - sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; + sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0]; sect_attrs->nsections = 0; sattr = §_attrs->attrs[0]; - gattr = §_attrs->grp.attrs[0]; + gattr = §_attrs->grp.bin_attrs[0]; for (i = 0; i < info->hdr->e_shnum; i++) { Elf_Shdr *sec = &info->sechdrs[i]; if (sect_empty(sec)) continue; + sysfs_bin_attr_init(&sattr->battr); sattr->address = sec->sh_addr; - sattr->name = kstrdup(info->secstrings + sec->sh_name, - GFP_KERNEL); - if (sattr->name == NULL) + sattr->battr.attr.name = + kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); + if (sattr->battr.attr.name == NULL) goto out; sect_attrs->nsections++; - sysfs_attr_init(&sattr->mattr.attr); - sattr->mattr.show = module_sect_show; - sattr->mattr.store = NULL; - sattr->mattr.attr.name = sattr->name; - sattr->mattr.attr.mode = S_IRUSR; - *(gattr++) = &(sattr++)->mattr.attr; + sattr->battr.read = module_sect_read; + sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4); + sattr->battr.attr.mode = 0400; + *(gattr++) = &(sattr++)->battr; } *gattr = NULL; @@ -1669,7 +1672,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info) continue; if (info->sechdrs[i].sh_type == SHT_NOTE) { sysfs_bin_attr_init(nattr); - nattr->attr.name = mod->sect_attrs->attrs[loaded].name; + nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name; nattr->attr.mode = S_IRUGO; nattr->size = info->sechdrs[i].sh_size; nattr->private = (void *) info->sechdrs[i].sh_addr; @@ -4379,7 +4382,7 @@ static int modules_open(struct inode *inode, struct file *file) if (!err) { struct seq_file *m = file->private_data; - m->private = kallsyms_show_value() ? NULL : (void *)8ul; + m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul; } return err; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ca5db40392d4106679c42f99b1d23f58b7545dae..2142c67676826d3ee50bdf0aa5d478810c6b8d27 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1311,9 +1311,6 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) void activate_task(struct rq *rq, struct task_struct *p, int flags) { - if (task_contributes_to_load(p)) - rq->nr_uninterruptible--; - enqueue_task(rq, p, flags); p->on_rq = TASK_ON_RQ_QUEUED; @@ -1323,9 +1320,6 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) { p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; - if (task_contributes_to_load(p)) - rq->nr_uninterruptible++; - dequeue_task(rq, p, flags); } @@ -2236,10 +2230,10 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, lockdep_assert_held(&rq->lock); -#ifdef CONFIG_SMP if (p->sched_contributes_to_load) rq->nr_uninterruptible--; +#ifdef CONFIG_SMP if (wake_flags & WF_MIGRATED) en_flags |= ENQUEUE_MIGRATED; #endif @@ -2583,7 +2577,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). */ smp_rmb(); - if (p->on_rq && ttwu_remote(p, wake_flags)) + if (READ_ONCE(p->on_rq) && ttwu_remote(p, wake_flags)) goto unlock; if (p->in_iowait) { @@ -2592,9 +2586,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) } #ifdef CONFIG_SMP - p->sched_contributes_to_load = !!task_contributes_to_load(p); - p->state = TASK_WAKING; - /* * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be * possible to, falsely, observe p->on_cpu == 0. @@ -2613,8 +2604,20 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in * __schedule(). See the comment for smp_mb__after_spinlock(). + * + * Form a control-dep-acquire with p->on_rq == 0 above, to ensure + * schedule()'s deactivate_task() has 'happened' and p will no longer + * care about it's own p->state. See the comment in __schedule(). */ - smp_rmb(); + smp_acquire__after_ctrl_dep(); + + /* + * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq + * == 0), which means we need to do an enqueue, change p->state to + * TASK_WAKING such that we can unlock p->pi_lock before doing the + * enqueue, such as ttwu_queue_wakelist(). + */ + p->state = TASK_WAKING; /* * If the owning (remote) CPU is still in the middle of schedule() with @@ -2962,6 +2965,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) * Silence PROVE_RCU. */ raw_spin_lock_irqsave(&p->pi_lock, flags); + rseq_migrate(p); /* * We're setting the CPU for the first time, we don't migrate, * so use __set_task_cpu(). @@ -3026,6 +3030,7 @@ void wake_up_new_task(struct task_struct *p) * as we're not fully set-up yet. */ p->recent_used_cpu = task_cpu(p); + rseq_migrate(p); __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); #endif rq = __task_rq_lock(p, &rf); @@ -4097,6 +4102,7 @@ static void __sched notrace __schedule(bool preempt) { struct task_struct *prev, *next; unsigned long *switch_count; + unsigned long prev_state; struct rq_flags rf; struct rq *rq; int cpu; @@ -4116,9 +4122,16 @@ static void __sched notrace __schedule(bool preempt) /* * Make sure that signal_pending_state()->signal_pending() below * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) - * done by the caller to avoid the race with signal_wake_up(). + * done by the caller to avoid the race with signal_wake_up(): + * + * __set_current_state(@state) signal_wake_up() + * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) + * wake_up_state(p, state) + * LOCK rq->lock LOCK p->pi_state + * smp_mb__after_spinlock() smp_mb__after_spinlock() + * if (signal_pending_state()) if (p->state & @state) * - * The membarrier system call requires a full memory barrier + * Also, the membarrier system call requires a full memory barrier * after coming from user-space, before storing to rq->curr. */ rq_lock(rq, &rf); @@ -4129,10 +4142,38 @@ static void __sched notrace __schedule(bool preempt) update_rq_clock(rq); switch_count = &prev->nivcsw; - if (!preempt && prev->state) { - if (signal_pending_state(prev->state, prev)) { + + /* + * We must load prev->state once (task_struct::state is volatile), such + * that: + * + * - we form a control dependency vs deactivate_task() below. + * - ptrace_{,un}freeze_traced() can change ->state underneath us. + */ + prev_state = prev->state; + if (!preempt && prev_state) { + if (signal_pending_state(prev_state, prev)) { prev->state = TASK_RUNNING; } else { + prev->sched_contributes_to_load = + (prev_state & TASK_UNINTERRUPTIBLE) && + !(prev_state & TASK_NOLOAD) && + !(prev->flags & PF_FROZEN); + + if (prev->sched_contributes_to_load) + rq->nr_uninterruptible++; + + /* + * __schedule() ttwu() + * prev_state = prev->state; if (p->on_rq && ...) + * if (prev_state) goto out; + * p->on_rq = 0; smp_acquire__after_ctrl_dep(); + * p->state = TASK_WAKING + * + * Where __schedule() and ttwu() have matching control dependencies. + * + * After this, schedule() must not care about p->state any more. + */ deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); if (prev->in_iowait) { @@ -4444,6 +4485,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, void *key) { + WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); return try_to_wake_up(curr->private, mode, wake_flags); } EXPORT_SYMBOL(default_wake_function); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 658aa7a2ae6f7d75e11012f437a45edd8befcc71..04fa8dbcfa4d78a33fa557619cd1fd8363b988de 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4039,7 +4039,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) return; } - rq->misfit_task_load = task_h_load(p); + /* + * Make sure that misfit_task_load will not be null even if + * task_h_load() returns 0. + */ + rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); } #else /* CONFIG_SMP */ @@ -7638,7 +7642,14 @@ static int detach_tasks(struct lb_env *env) switch (env->migration_type) { case migrate_load: - load = task_h_load(p); + /* + * Depending of the number of CPUs and tasks and the + * cgroup hierarchy, task_h_load() can return a null + * value. Make sure that env->imbalance decreases + * otherwise detach_tasks() will stop only after + * detaching up to loop_max tasks. + */ + load = max_t(unsigned long, task_h_load(p), 1); if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) diff --git a/kernel/signal.c b/kernel/signal.c index ee22ec78fd6d5c136429f5a2f67ca19d40307ba1..6f16f7c5d3755542aacf4c8bd31962b794199294 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -719,7 +719,7 @@ static int dequeue_synchronous_signal(kernel_siginfo_t *info) * Return the first synchronous signal in the queue. */ list_for_each_entry(q, &pending->list, list) { - /* Synchronous signals have a postive si_code */ + /* Synchronous signals have a positive si_code */ if ((q->info.si_code > SI_USER) && (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { sync = q; diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 398e6eadb86171fd8c46f09be0d2efd60fef6556..026ac01af9da7e7619ec365e1efbdd638c0b2695 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -521,8 +522,8 @@ static int calc_wheel_index(unsigned long expires, unsigned long clk) * Force expire obscene large timeouts to expire at the * capacity limit of the wheel. */ - if (expires >= WHEEL_TIMEOUT_CUTOFF) - expires = WHEEL_TIMEOUT_MAX; + if (delta >= WHEEL_TIMEOUT_CUTOFF) + expires = clk + WHEEL_TIMEOUT_MAX; idx = calc_index(expires, LVL_DEPTH - 1); } @@ -584,7 +585,15 @@ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer) * Set the next expiry time and kick the CPU so it can reevaluate the * wheel: */ - base->next_expiry = timer->expires; + if (time_before(timer->expires, base->clk)) { + /* + * Prevent from forward_timer_base() moving the base->clk + * backward + */ + base->next_expiry = base->clk; + } else { + base->next_expiry = timer->expires; + } wake_up_nohz_cpu(base->cpu); } @@ -896,10 +905,13 @@ static inline void forward_timer_base(struct timer_base *base) * If the next expiry value is > jiffies, then we fast forward to * jiffies otherwise we forward to the next expiry value. */ - if (time_after(base->next_expiry, jnow)) + if (time_after(base->next_expiry, jnow)) { base->clk = jnow; - else + } else { + if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk))) + return; base->clk = base->next_expiry; + } #endif } @@ -1731,6 +1743,13 @@ void update_process_times(int user_tick) scheduler_tick(); if (IS_ENABLED(CONFIG_POSIX_TIMERS)) run_posix_cpu_timers(); + + /* The current CPU might make use of net randoms without receiving IRQs + * to renew them often enough. Let's update the net_rand_state from a + * non-constant value that's not affine to the number of calls to make + * sure it's updated when there's some activity (we don't care in idle). + */ + this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick); } /** diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index ffa7a76de08604f38ecdd2ee874d8f77cf3ddd7d..256f2486f9bd2f4d0dd0ef603df4955465836417 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -3,6 +3,11 @@ config HAVE_ARCH_KGDB bool +# set if architecture has the its kgdb_arch_handle_qxfer_pkt +# function to enable gdb stub to address XML packet sent from GDB. +config HAVE_ARCH_KGDB_QXFER_PKT + bool + menuconfig KGDB bool "KGDB: kernel debugger" depends on HAVE_ARCH_KGDB diff --git a/lib/packing.c b/lib/packing.c index 50d1e9f2f5a77338dad83d3951c3d5a75947924a..6ed72dccfdb5d667cfa7a4a67d3d727eb5d417ad 100644 --- a/lib/packing.c +++ b/lib/packing.c @@ -73,6 +73,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit, * @endbit: The index (in logical notation, compensated for quirks) where * the packed value ends within pbuf. Must be smaller than, or equal * to, startbit. + * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf. * @op: If PACK, then uval will be treated as const pointer and copied (packed) * into pbuf, between startbit and endbit. * If UNPACK, then pbuf will be treated as const pointer and the logical diff --git a/lib/random32.c b/lib/random32.c index 763b920a6206cd6ffc4cd9668885223f2d6a2790..3d749abb9e80d54d8e330e07fb8b773b7bec2b83 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) } #endif -static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +DEFINE_PER_CPU(struct rnd_state, net_rand_state); /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9f6890aedd1ae1644085e7f3c490a400b5f9ee7e..c949c1e3b87c174777534faa64320556096d3bf0 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -31,7 +31,7 @@ union nested_table { union nested_table __rcu *table; - struct rhash_lock_head *bucket; + struct rhash_lock_head __rcu *bucket; }; static u32 head_hashfn(struct rhashtable *ht, @@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, } static int rhashtable_rehash_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); @@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); - struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); + struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); int err; if (!bkt) @@ -485,7 +485,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht, } static void *rhashtable_lookup_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) { @@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, return ERR_PTR(-ENOENT); } -static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, - struct rhash_lock_head **bkt, - struct bucket_table *tbl, - unsigned int hash, - struct rhash_head *obj, - void *data) +static struct bucket_table *rhashtable_insert_one( + struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, + struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, + void *data) { struct bucket_table *new_tbl; struct rhash_head *head; @@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, { struct bucket_table *new_tbl; struct bucket_table *tbl; - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; unsigned int hash; void *data; @@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht) } EXPORT_SYMBOL_GPL(rhashtable_destroy); -struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **__rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); @@ -1202,10 +1200,10 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(__rht_bucket_nested); -struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash) { - static struct rhash_lock_head *rhnull; + static struct rhash_lock_head __rcu *rhnull; if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); @@ -1213,9 +1211,8 @@ struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, } EXPORT_SYMBOL_GPL(rht_bucket_nested); -struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, - struct bucket_table *tbl, - unsigned int hash) +struct rhash_lock_head __rcu **rht_bucket_nested_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); diff --git a/mm/filemap.c b/mm/filemap.c index f0ae9a6308cb4d53a420ffa2acfe38e0717f0699..385759c4ce4be6c054c9328773e5b24eba467005 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2028,7 +2028,7 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb, page = find_get_page(mapping, index); if (!page) { - if (iocb->ki_flags & IOCB_NOWAIT) + if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) goto would_block; page_cache_sync_readahead(mapping, ra, filp, @@ -2038,6 +2038,10 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb, goto no_cached_page; } if (PageReadahead(page)) { + if (iocb->ki_flags & IOCB_NOIO) { + put_page(page); + goto out; + } page_cache_async_readahead(mapping, ra, filp, page, index, last_index - index); @@ -2160,6 +2164,11 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb, } readpage: + if (iocb->ki_flags & IOCB_NOIO) { + unlock_page(page); + put_page(page); + goto would_block; + } /* * A previous I/O error may have been due to temporary * failures, eg. multipath errors. @@ -2249,9 +2258,19 @@ EXPORT_SYMBOL_GPL(generic_file_buffered_read); * * This is the "read_iter()" routine for all filesystems * that can use the page cache directly. + * + * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall + * be returned when no data can be read without waiting for I/O requests + * to complete; it doesn't prevent readahead. + * + * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O + * requests shall be made for the read or for readahead. When no data + * can be read, -EAGAIN shall be returned. When readahead would be + * triggered, a partial, possibly empty read shall be returned. + * * Return: * * number of bytes copied, even for partial reads - * * negative error code if nothing was read + * * negative error code (or 0 if IOCB_NOIO) if nothing was read */ ssize_t generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fab4485b9e52b6dd78ddcdb00906de2d1b815b76..590111ea6975d93cef95cdb65ccd59121ae015c5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -45,7 +45,10 @@ int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; +#ifdef CONFIG_CMA static struct cma *hugetlb_cma[MAX_NUMNODES]; +#endif +static unsigned long hugetlb_cma_size __initdata; /* * Minimum page order among possible hugepage sizes, set to a proper value @@ -1235,9 +1238,10 @@ static void free_gigantic_page(struct page *page, unsigned int order) * If the page isn't allocated using the cma allocator, * cma_release() returns false. */ - if (IS_ENABLED(CONFIG_CMA) && - cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) +#ifdef CONFIG_CMA + if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) return; +#endif free_contig_range(page_to_pfn(page), 1 << order); } @@ -1248,7 +1252,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, { unsigned long nr_pages = 1UL << huge_page_order(h); - if (IS_ENABLED(CONFIG_CMA)) { +#ifdef CONFIG_CMA + { struct page *page; int node; @@ -1262,6 +1267,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, return page; } } +#endif return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); } @@ -2571,7 +2577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) for (i = 0; i < h->max_huge_pages; ++i) { if (hstate_is_gigantic(h)) { - if (IS_ENABLED(CONFIG_CMA) && hugetlb_cma[0]) { + if (hugetlb_cma_size) { pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); break; } @@ -5654,7 +5660,6 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) } #ifdef CONFIG_CMA -static unsigned long hugetlb_cma_size __initdata; static bool cma_reserve_called __initdata; static int __init cmdline_parse_hugetlb_cma(char *p) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b043c40a21d434ad7de7a10a779a06208c5ce1ef..700f5160f3e4dcaaed5199923e1aedaa57e9ad08 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -958,6 +958,9 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, return SCAN_ADDRESS_RANGE; if (!hugepage_vma_check(vma, vma->vm_flags)) return SCAN_VMA_CHECK; + /* Anon VMA expected */ + if (!vma->anon_vma || vma->vm_ops) + return SCAN_VMA_CHECK; return 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 19622328e4b5acee9987873e9cf51ade3c6b3311..13f559af1ab6a359b40c8641be754be7fd56079a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5669,7 +5669,6 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.to)) page_counter_uncharge(&mc.to->memory, mc.moved_swap); - mem_cgroup_id_get_many(mc.to, mc.moved_swap); css_put_many(&mc.to->css, mc.moved_swap); mc.moved_swap = 0; @@ -5860,7 +5859,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, ent = target.ent; if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { mc.precharge--; - /* we fixup refcnts and charges later. */ + mem_cgroup_id_get_many(mc.to, 1); + /* we fixup other refcnts and charges later. */ mc.moved_swap++; } break; @@ -7186,6 +7186,13 @@ static struct cftype memsw_files[] = { { }, /* terminate */ }; +/* + * If mem_cgroup_swap_init() is implemented as a subsys_initcall() + * instead of a core_initcall(), this could mean cgroup_memory_noswap still + * remains set to false even when memcg is disabled via "cgroup_disable=memory" + * boot parameter. This may result in premature OOPS inside + * mem_cgroup_get_nr_swap_pages() function in corner cases. + */ static int __init mem_cgroup_swap_init(void) { /* No memory control -> no swap control */ @@ -7200,6 +7207,6 @@ static int __init mem_cgroup_swap_init(void) return 0; } -subsys_initcall(mem_cgroup_swap_init); +core_initcall(mem_cgroup_swap_init); #endif /* CONFIG_MEMCG_SWAP */ diff --git a/mm/memory.c b/mm/memory.c index 87ec87cdc1ff1a096243fd7c9beec1c431dbd556..3ecad55103adb89bc6ff213e6ce617745a966990 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1601,7 +1601,7 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, return insert_pages(vma, addr, pages, num, vma->vm_page_prot); #else unsigned long idx = 0, pgcount = *num; - int err; + int err = -EINVAL; for (; idx < pgcount; ++idx) { err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); diff --git a/mm/migrate.c b/mm/migrate.c index f3772967355861556660db86d6a5dfb59c890971..40cd7016ae6fc60120a780611d17670556357b7c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1160,22 +1160,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage, return rc; } -/* - * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work - * around it. - */ -#if defined(CONFIG_ARM) && \ - defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700 -#define ICE_noinline noinline -#else -#define ICE_noinline -#endif - /* * Obtain the lock on page, remove all ptes and migrate the page * to the newly allocated page in newpage. */ -static ICE_noinline int unmap_and_move(new_page_t get_new_page, +static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, unsigned long private, struct page *page, int force, enum migrate_mode mode, diff --git a/mm/mmap.c b/mm/mmap.c index 59a4682ebf3faedb2cd00ec39656de3a40290050..8c7ca737a19b3db6274c9f918c00bfdfe9597dcf 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2620,7 +2620,7 @@ static void unmap_region(struct mm_struct *mm, * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. */ -static void +static bool detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) { @@ -2645,6 +2645,17 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, /* Kill the cache */ vmacache_invalidate(mm); + + /* + * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or + * VM_GROWSUP VMA. Such VMAs can change their size under + * down_read(mmap_lock) and collide with the VMA we are about to unmap. + */ + if (vma && (vma->vm_flags & VM_GROWSDOWN)) + return false; + if (prev && (prev->vm_flags & VM_GROWSUP)) + return false; + return true; } /* @@ -2825,7 +2836,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, } /* Detach vmas from rbtree */ - detach_vmas_to_be_unmapped(mm, vma, prev, end); + if (!detach_vmas_to_be_unmapped(mm, vma, prev, end)) + downgrade = false; if (downgrade) mmap_write_downgrade(mm); diff --git a/mm/mremap.c b/mm/mremap.c index 5dd572d57ca9919b7e9821bf753131fe64f770fc..6b153dc05fe4882c76eaac4150c8c20e12956af3 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -206,9 +206,28 @@ static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, /* * The destination pmd shouldn't be established, free_pgtables() - * should have release it. + * should have released it. + * + * However, there's a case during execve() where we use mremap + * to move the initial stack, and in that case the target area + * may overlap the source area (always moving down). + * + * If everything is PMD-aligned, that works fine, as moving + * each pmd down will clear the source pmd. But if we first + * have a few 4kB-only pages that get moved down, and then + * hit the "now the rest is PMD-aligned, let's do everything + * one pmd at a time", we will still have the old (now empty + * of any 4kB pages, but still there) PMD in the page table + * tree. + * + * Warn on it once - because we really should try to figure + * out how to do this better - but then say "I won't move + * this pmd". + * + * One alternative might be to just unmap the target pmd at + * this point, and verify that it really is empty. We'll see. */ - if (WARN_ON(!pmd_none(*new_pmd))) + if (WARN_ON_ONCE(!pmd_none(*new_pmd))) return false; /* diff --git a/mm/shmem.c b/mm/shmem.c index a0dbe62f8042e751508d7eb0eee392089cb5a787..b2abca3f7f33f2996f84eed7db0d52e7d7ed81f9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3178,7 +3178,7 @@ static int shmem_initxattrs(struct inode *inode, new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, GFP_KERNEL); if (!new_xattr->name) { - kfree(new_xattr); + kvfree(new_xattr); return -ENOMEM; } diff --git a/mm/slab_common.c b/mm/slab_common.c index 37d48a56431d04d279a371a5b01258ba90ef8894..fe8b68482670d8a6f52a38650b6ddc5951e9b1b6 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -326,6 +326,14 @@ int slab_unmergeable(struct kmem_cache *s) if (s->refcount < 0) return 1; +#ifdef CONFIG_MEMCG_KMEM + /* + * Skip the dying kmem_cache. + */ + if (s->memcg_params.dying) + return 1; +#endif + return 0; } @@ -886,12 +894,15 @@ static int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static void flush_memcg_workqueue(struct kmem_cache *s) +static void memcg_set_kmem_cache_dying(struct kmem_cache *s) { spin_lock_irq(&memcg_kmem_wq_lock); s->memcg_params.dying = true; spin_unlock_irq(&memcg_kmem_wq_lock); +} +static void flush_memcg_workqueue(struct kmem_cache *s) +{ /* * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make * sure all registered rcu callbacks have been invoked. @@ -923,10 +934,6 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) { return 0; } - -static inline void flush_memcg_workqueue(struct kmem_cache *s) -{ -} #endif /* CONFIG_MEMCG_KMEM */ void slab_kmem_cache_release(struct kmem_cache *s) @@ -944,8 +951,6 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; - flush_memcg_workqueue(s); - get_online_cpus(); get_online_mems(); @@ -955,6 +960,22 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->refcount) goto out_unlock; +#ifdef CONFIG_MEMCG_KMEM + memcg_set_kmem_cache_dying(s); + + mutex_unlock(&slab_mutex); + + put_online_mems(); + put_online_cpus(); + + flush_memcg_workqueue(s); + + get_online_cpus(); + get_online_mems(); + + mutex_lock(&slab_mutex); +#endif + err = shutdown_memcg_caches(s); if (!err) err = shutdown_cache(s); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index c8d6a07e23c579c5b097a62e229cee0145efd8c3..3dd7c972677be27b276c44942419a33a79b13214 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -503,11 +503,10 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev, lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key); } -static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) +static void vlan_dev_set_lockdep_class(struct net_device *dev) { - lockdep_set_class_and_subclass(&dev->addr_list_lock, - &vlan_netdev_addr_lock_key, - subclass); + lockdep_set_class(&dev->addr_list_lock, + &vlan_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL); } @@ -601,7 +600,7 @@ static int vlan_dev_init(struct net_device *dev) SET_NETDEV_DEVTYPE(dev, &vlan_type); - vlan_dev_set_lockdep_class(dev, dev->lower_level); + vlan_dev_set_lockdep_class(dev); vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); if (!vlan->vlan_pcpu_stats) diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 13cd683a658ab67b38aa26bd0195f23113707e29..12ecacf0c55fbd83b76b46d0cc8fd9339c5967bf 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -362,6 +362,10 @@ static void p9_read_work(struct work_struct *work) if (m->rreq->status == REQ_STATUS_SENT) { list_del(&m->rreq->req_list); p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD); + } else if (m->rreq->status == REQ_STATUS_FLSHD) { + /* Ignore replies associated with a cancelled request. */ + p9_debug(P9_DEBUG_TRANS, + "Ignore replies associated with a cancelled request\n"); } else { spin_unlock(&m->client->lock); p9_debug(P9_DEBUG_ERROR, @@ -703,11 +707,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) { p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + spin_lock(&client->lock); + /* Ignore cancelled request if message has been received + * before lock. + */ + if (req->status == REQ_STATUS_RCVD) { + spin_unlock(&client->lock); + return 0; + } + /* we haven't received a response for oldreq, * remove it from the list. */ - spin_lock(&client->lock); list_del(&req->req_list); + req->status = REQ_STATUS_FLSHD; spin_unlock(&client->lock); p9_req_put(req); @@ -803,20 +816,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) return -ENOMEM; ts->rd = fget(rfd); + if (!ts->rd) + goto out_free_ts; + if (!(ts->rd->f_mode & FMODE_READ)) + goto out_put_rd; ts->wr = fget(wfd); - if (!ts->rd || !ts->wr) { - if (ts->rd) - fput(ts->rd); - if (ts->wr) - fput(ts->wr); - kfree(ts); - return -EIO; - } + if (!ts->wr) + goto out_put_rd; + if (!(ts->wr->f_mode & FMODE_WRITE)) + goto out_put_wr; client->trans = ts; client->status = Connected; return 0; + +out_put_wr: + fput(ts->wr); +out_put_rd: + fput(ts->rd); +out_free_ts: + kfree(ts); + return -EIO; } static int p9_socket_open(struct p9_client *client, struct socket *csocket) diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index fd91cd34f25e03d0178cbe66568ed392a7013f4e..dec3f35467c9e195628c26fbb84be2d87d2746a9 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1187,7 +1187,10 @@ static int __must_check ax25_connect(struct socket *sock, if (addr_len > sizeof(struct sockaddr_ax25) && fsa->fsa_ax25.sax25_ndigis != 0) { /* Valid number of digipeaters ? */ - if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) { + if (fsa->fsa_ax25.sax25_ndigis < 1 || + fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS || + addr_len < sizeof(struct sockaddr_ax25) + + sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) { err = -EINVAL; goto out_release; } @@ -1507,7 +1510,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax; /* Valid number of digipeaters ? */ - if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) { + if (usax->sax25_ndigis < 1 || + usax->sax25_ndigis > AX25_MAX_DIGIS || + addr_len < sizeof(struct sockaddr_ax25) + + sizeof(ax25_address) * usax->sax25_ndigis) { err = -EINVAL; goto out; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index cfeaee347db32d5502e56e25b8f92d6137613ae4..af9d7f2ff8ba3ff045a7289155b5d8d40fa7dd4d 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1338,6 +1338,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, { struct discovery_state *d = &hdev->discovery; + if (len > HCI_MAX_AD_LENGTH) + return; + bacpy(&d->last_adv_addr, bdaddr); d->last_adv_addr_type = bdaddr_type; d->last_adv_rssi = rssi; @@ -5355,7 +5358,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, - u8 direct_addr_type, s8 rssi, u8 *data, u8 len) + u8 direct_addr_type, s8 rssi, u8 *data, u8 len, + bool ext_adv) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; @@ -5377,6 +5381,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, return; } + if (!ext_adv && len > HCI_MAX_AD_LENGTH) { + bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); + return; + } + /* Find the end of the data in case the report contains padded zero * bytes at the end causing an invalid length value. * @@ -5437,7 +5446,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, direct_addr); - if (conn && type == LE_ADV_IND) { + if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { /* Store report for later inclusion by * mgmt_device_connected */ @@ -5491,7 +5500,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * event or send an immediate device found event if the data * should not be stored for later. */ - if (!has_pending_adv_report(hdev)) { + if (!ext_adv && !has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ @@ -5526,7 +5535,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* If the new report will trigger a SCAN_REQ store it for * later merging. */ - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; @@ -5566,7 +5576,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, - ev->data, ev->length); + ev->data, ev->length, false); } else { bt_dev_err(hdev, "Dropping invalid advertising data"); } @@ -5638,7 +5648,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, ev->rssi, - ev->data, ev->length); + ev->data, ev->length, + !(evt_type & LE_EXT_ADV_LEGACY_PDU)); } ptr += sizeof(*ev) + ev->length; @@ -5836,7 +5847,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, &ev->direct_addr, - ev->direct_addr_type, ev->rssi, NULL, 0); + ev->direct_addr_type, ev->rssi, NULL, 0, + false); ptr += sizeof(*ev); } diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index bfd4ccd80847de61b44462df7b70f46dfb382acd..b03c469cd01fa8f22e821ae00ae764928f8a603d 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -147,6 +147,20 @@ int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) return a + (long)b + c + d + (long)e + f; } +struct bpf_fentry_test_t { + struct bpf_fentry_test_t *a; +}; + +int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) +{ + return (long)arg; +} + +int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) +{ + return (long)arg->a; +} + int noinline bpf_modify_return_test(int a, int *b) { *b += 1; @@ -185,6 +199,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { + struct bpf_fentry_test_t arg = {}; u16 side_effect = 0, ret = 0; int b = 2, err = -EFAULT; u32 retval = 0; @@ -197,7 +212,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, bpf_fentry_test3(4, 5, 6) != 15 || bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || - bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111) + bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || + bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || + bpf_fentry_test8(&arg) != 0) goto out; break; case BPF_MODIFY_RETURN: diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index c0f0990f30b60415fa6b929415f5ac67bba4ed01..4494ea6056cdb81e189c4d6cf95209d182a9fc0d 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -39,7 +39,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, { struct mbox_request req; struct mbox_reply reply; - loff_t pos; + loff_t pos = 0; ssize_t n; int ret = -EFAULT; @@ -50,7 +50,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, req.len = optlen; if (!bpfilter_ops.info.pid) goto out; - n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), + n = kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), &pos); if (n != sizeof(req)) { pr_err("write fail %zd\n", n); diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c index 779e1eb754430fa43099b237f5cba5db00efe746..90592af9db619fcd7b87d153f473caf582888d88 100644 --- a/net/bridge/br_mrp.c +++ b/net/bridge/br_mrp.c @@ -86,7 +86,7 @@ static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p, { struct ethhdr *eth_hdr; struct sk_buff *skb; - u16 *version; + __be16 *version; skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH); if (!skb) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 83490bf73a13b3f2e0f19d83eab6e780ee8b7204..4c4a93abde680db8f27306be6529972bc440911b 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1007,7 +1007,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs); if (skb_transport_offset(skb) + ipv6_transport_len(skb) < - nsrcs_offset + sizeof(_nsrcs)) + nsrcs_offset + sizeof(__nsrcs)) return -EINVAL; _nsrcs = skb_header_pointer(skb, nsrcs_offset, diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2130fe0194e64e03c35e41f24942faf6daa80d10..e0ea6dbbc97ed464f35e2d14a3db5c34b4b1a283 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -430,7 +430,7 @@ struct net_bridge { struct hlist_head fdb_list; #if IS_ENABLED(CONFIG_BRIDGE_MRP) - struct list_head __rcu mrp_list; + struct list_head mrp_list; #endif }; diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h index 33b255e38ffecf563aad75d259ebd2203df1414b..315eb37d89f0f3ad4ae4dcc9b8654b769e18854e 100644 --- a/net/bridge/br_private_mrp.h +++ b/net/bridge/br_private_mrp.h @@ -8,7 +8,7 @@ struct br_mrp { /* list of mrp instances */ - struct list_head __rcu list; + struct list_head list; struct net_bridge_port __rcu *p_port; struct net_bridge_port __rcu *s_port; diff --git a/net/compat.c b/net/compat.c index 5e3041a2c37d4da62a007f77fe226a0a0f81ec66..434838bef5f80d58934d0cf92ee99adbdd50d053 100644 --- a/net/compat.c +++ b/net/compat.c @@ -202,7 +202,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, /* Advance. */ kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp); - ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); + ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, cmsg.cmsg_len); } /* diff --git a/net/core/dev.c b/net/core/dev.c index 90b59fc50dc9c99891f2510685061880abab17e8..7a774ebf64e26dca2c1247c80898e34bf600570b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5601,7 +5601,7 @@ static void flush_backlog(struct work_struct *work) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); - kfree_skb(skb); + dev_kfree_skb_irq(skb); input_queue_head_incr(sd); } } diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 6393ba930097b49b26a70cf01b657da08514f552..54cd568e7c2f51bdc0da38d267f4276b6d76804e 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -690,6 +690,15 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from) if (to->addr_len != from->addr_len) return; + /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two + * reasons: + * 1) This is always called without any addr_list_lock, so as the + * outermost one here, it must be 0. + * 2) This is called by some callers after unlinking the upper device, + * so the dev->lower_level becomes 1 again. + * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or + * larger. + */ netif_addr_lock_bh(from); netif_addr_lock_nested(to); __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); @@ -911,6 +920,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from) if (to->addr_len != from->addr_len) return; + /* See the above comments inside dev_uc_unsync(). */ netif_addr_lock_bh(from); netif_addr_lock_nested(to); __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); diff --git a/net/core/devlink.c b/net/core/devlink.c index 2cafbc808b090638977b85cb2ab3d0f0f07ed3f5..47f14a2f25fbcd296c69b72349ad4d5a3aad7014 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -1065,7 +1065,9 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -1266,7 +1268,9 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg, devlink, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -1498,7 +1502,9 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg, devlink_sb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -3299,7 +3305,9 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -3569,7 +3577,9 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI); - if (err && err != -EOPNOTSUPP) { + if (err == -EOPNOTSUPP) { + err = 0; + } else if (err) { mutex_unlock(&devlink->lock); goto out; } @@ -4518,7 +4528,9 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->extack); mutex_unlock(&devlink->lock); - if (err && err != -EOPNOTSUPP) + if (err == -EOPNOTSUPP) + err = 0; + else if (err) break; idx++; } @@ -8567,6 +8579,7 @@ static const struct devlink_trap_group devlink_trap_group_generic[] = { DEVLINK_TRAP_GROUP(PIM), DEVLINK_TRAP_GROUP(UC_LB), DEVLINK_TRAP_GROUP(LOCAL_DELIVERY), + DEVLINK_TRAP_GROUP(EXTERNAL_DELIVERY), DEVLINK_TRAP_GROUP(IPV6), DEVLINK_TRAP_GROUP(PTP_EVENT), DEVLINK_TRAP_GROUP(PTP_GENERAL), diff --git a/net/core/filter.c b/net/core/filter.c index 73395384afe2f88d5b0d7483088b48a1226ac83f..82e1b5b0616758a470fb760635d92d9fe269d342 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5853,12 +5853,16 @@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) { unsigned int iphdr_len; - if (skb->protocol == cpu_to_be16(ETH_P_IP)) + switch (skb_protocol(skb, true)) { + case cpu_to_be16(ETH_P_IP): iphdr_len = sizeof(struct iphdr); - else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) + break; + case cpu_to_be16(ETH_P_IPV6): iphdr_len = sizeof(struct ipv6hdr); - else + break; + default: return 0; + } if (skb_headlen(skb) < iphdr_len) return 0; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index d02df0b6d0d99ae12783f15746fbdff49b603cc7..142a8824f0a8ef348b56b0497484f1e3af391ae3 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -70,10 +70,10 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, EXPORT_SYMBOL(skb_flow_dissector_init); #ifdef CONFIG_BPF_SYSCALL -int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog) +int flow_dissector_bpf_prog_attach_check(struct net *net, + struct bpf_prog *prog) { enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; - struct bpf_prog *attached; if (net == &init_net) { /* BPF flow dissector in the root namespace overrides @@ -86,26 +86,17 @@ int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog) for_each_net(ns) { if (ns == &init_net) continue; - if (rcu_access_pointer(ns->bpf.progs[type])) + if (rcu_access_pointer(ns->bpf.run_array[type])) return -EEXIST; } } else { /* Make sure root flow dissector is not attached * when attaching to the non-root namespace. */ - if (rcu_access_pointer(init_net.bpf.progs[type])) + if (rcu_access_pointer(init_net.bpf.run_array[type])) return -EEXIST; } - attached = rcu_dereference_protected(net->bpf.progs[type], - lockdep_is_held(&netns_bpf_mutex)); - if (attached == prog) - /* The same program cannot be attached twice */ - return -EINVAL; - - rcu_assign_pointer(net->bpf.progs[type], prog); - if (attached) - bpf_prog_put(attached); return 0; } #endif /* CONFIG_BPF_SYSCALL */ @@ -903,7 +894,6 @@ bool __skb_flow_dissect(const struct net *net, struct flow_dissector_key_addrs *key_addrs; struct flow_dissector_key_tags *key_tags; struct flow_dissector_key_vlan *key_vlan; - struct bpf_prog *attached = NULL; enum flow_dissect_ret fdret; enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; bool mpls_el = false; @@ -960,14 +950,14 @@ bool __skb_flow_dissect(const struct net *net, WARN_ON_ONCE(!net); if (net) { enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; + struct bpf_prog_array *run_array; rcu_read_lock(); - attached = rcu_dereference(init_net.bpf.progs[type]); - - if (!attached) - attached = rcu_dereference(net->bpf.progs[type]); + run_array = rcu_dereference(init_net.bpf.run_array[type]); + if (!run_array) + run_array = rcu_dereference(net->bpf.run_array[type]); - if (attached) { + if (run_array) { struct bpf_flow_keys flow_keys; struct bpf_flow_dissector ctx = { .flow_keys = &flow_keys, @@ -975,6 +965,7 @@ bool __skb_flow_dissect(const struct net *net, .data_end = data + hlen, }; __be16 n_proto = proto; + struct bpf_prog *prog; if (skb) { ctx.skb = skb; @@ -985,7 +976,8 @@ bool __skb_flow_dissect(const struct net *net, n_proto = skb->protocol; } - ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff, + prog = READ_ONCE(run_array->items[0].prog); + ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, hlen, flags); __skb_flow_bpf_to_target(&flow_keys, flow_dissector, target_container); diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index b739cfab796e49aec8f3d9d21fce96cc0d58620b..2076219b8ba582ad9533075b10be830d5ff1758e 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -4,6 +4,7 @@ #include #include #include +#include struct flow_rule *flow_rule_alloc(unsigned int num_actions) { diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index e353b822bb1574e177438023e3139dc01d1d93af..7bd6440c63bf5718fffce286f871ecedb1734adf 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1108,7 +1108,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) trans_timeout = queue->trans_timeout; spin_unlock_irq(&queue->_xmit_lock); - return sprintf(buf, "%lu", trans_timeout); + return sprintf(buf, fmt_ulong, trans_timeout); } static unsigned int get_netdev_queue_index(struct netdev_queue *queue) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9aedc15736adfb87758f509d53fdfc0f2d1a0889..85a4b0101f7618f58b209fd48e7b829e2768040b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3343,7 +3343,8 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, */ if (err < 0) { /* If device is not registered at all, free it now */ - if (dev->reg_state == NETREG_UNINITIALIZED) + if (dev->reg_state == NETREG_UNINITIALIZED || + dev->reg_state == NETREG_UNREGISTERED) free_netdev(dev); goto out; } diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 351afbf6bfbac1d7d86af2dc4a98c058dd9531e0..6a32a1fd34f8cd1306a43f1f14bd6e038ff2efe6 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -683,7 +683,7 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp) return container_of(parser, struct sk_psock, parser); } -static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb) +static void sk_psock_skb_redirect(struct sk_buff *skb) { struct sk_psock *psock_other; struct sock *sk_other; @@ -715,12 +715,11 @@ static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb) } } -static void sk_psock_tls_verdict_apply(struct sk_psock *psock, - struct sk_buff *skb, int verdict) +static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict) { switch (verdict) { case __SK_REDIRECT: - sk_psock_skb_redirect(psock, skb); + sk_psock_skb_redirect(skb); break; case __SK_PASS: case __SK_DROP: @@ -741,8 +740,8 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); } + sk_psock_tls_verdict_apply(skb, ret); rcu_read_unlock(); - sk_psock_tls_verdict_apply(psock, skb, ret); return ret; } EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); @@ -770,7 +769,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock, } goto out_free; case __SK_REDIRECT: - sk_psock_skb_redirect(psock, skb); + sk_psock_skb_redirect(skb); break; case __SK_DROP: /* fall-through */ @@ -782,11 +781,18 @@ static void sk_psock_verdict_apply(struct sk_psock *psock, static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) { - struct sk_psock *psock = sk_psock_from_strp(strp); + struct sk_psock *psock; struct bpf_prog *prog; int ret = __SK_DROP; + struct sock *sk; rcu_read_lock(); + sk = strp->sk; + psock = sk_psock(sk); + if (unlikely(!psock)) { + kfree_skb(skb); + goto out; + } prog = READ_ONCE(psock->progs.skb_verdict); if (likely(prog)) { skb_orphan(skb); @@ -794,8 +800,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); } - rcu_read_unlock(); sk_psock_verdict_apply(psock, skb, ret); +out: + rcu_read_unlock(); } static int sk_psock_strp_read_done(struct strparser *strp, int err) diff --git a/net/core/sock.c b/net/core/sock.c index d832c650287c375cd9e99e40c09f3ec354487716..2e5b7870e5d35d40de7d4fdc68bfe1bbf9c09de7 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1926,7 +1926,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) /* sk->sk_memcg will be populated at accept() time */ newsk->sk_memcg = NULL; - cgroup_sk_alloc(&newsk->sk_cgrp_data); + cgroup_sk_clone(&newsk->sk_cgrp_data); rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 4059f94e9bb5bf7316227984151052b47e321b47..0971f17e8e5429427d48f60524eaf4c9d8f292f9 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -70,11 +70,49 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) struct fd f; int ret; + if (attr->attach_flags || attr->replace_bpf_fd) + return -EINVAL; + f = fdget(ufd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); - ret = sock_map_prog_update(map, prog, attr->attach_type); + ret = sock_map_prog_update(map, prog, NULL, attr->attach_type); + fdput(f); + return ret; +} + +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) +{ + u32 ufd = attr->target_fd; + struct bpf_prog *prog; + struct bpf_map *map; + struct fd f; + int ret; + + if (attr->attach_flags || attr->replace_bpf_fd) + return -EINVAL; + + f = fdget(ufd); + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + prog = bpf_prog_get(attr->attach_bpf_fd); + if (IS_ERR(prog)) { + ret = PTR_ERR(prog); + goto put_map; + } + + if (prog->type != ptype) { + ret = -EINVAL; + goto put_prog; + } + + ret = sock_map_prog_update(map, NULL, prog, attr->attach_type); +put_prog: + bpf_prog_put(prog); +put_map: fdput(f); return ret; } @@ -1203,27 +1241,32 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) } int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, - u32 which) + struct bpf_prog *old, u32 which) { struct sk_psock_progs *progs = sock_map_progs(map); + struct bpf_prog **pprog; if (!progs) return -EOPNOTSUPP; switch (which) { case BPF_SK_MSG_VERDICT: - psock_set_prog(&progs->msg_parser, prog); + pprog = &progs->msg_parser; break; case BPF_SK_SKB_STREAM_PARSER: - psock_set_prog(&progs->skb_parser, prog); + pprog = &progs->skb_parser; break; case BPF_SK_SKB_STREAM_VERDICT: - psock_set_prog(&progs->skb_verdict, prog); + pprog = &progs->skb_verdict; break; default: return -EOPNOTSUPP; } + if (old) + return psock_replace_prog(pprog, prog, old); + + psock_set_prog(pprog, prog); return 0; } diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index adcb3aea576d6db403388303230e798917505782..bbdd3c7b6cb5b960e4d107c3f25e7067db938483 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -101,6 +101,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) more_reuse->prog = reuse->prog; more_reuse->reuseport_id = reuse->reuseport_id; more_reuse->bind_inany = reuse->bind_inany; + more_reuse->has_conns = reuse->has_conns; memcpy(more_reuse->socks, reuse->socks, reuse->num_socks * sizeof(struct sock *)); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index f93f8ace6c561912fe7757d9eb0343d16aff2a37..6ada114bbcca24a2f70e9392f85de4a2d7b0353e 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -274,7 +274,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write, ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && !ret) { if (jit_enable < 2 || - (jit_enable == 2 && bpf_dump_raw_ok())) { + (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) { *(int *)table->data = jit_enable; if (jit_enable == 2) pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n"); diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 88fd07f47040ca57ca4a215b0d30d830a70c9ecc..dd8a1c1dc07ddbd120f293aa4e36feef060e9a18 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -376,10 +376,17 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info) } static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, - const struct ethnl_dump_ctx *ctx) + const struct ethnl_dump_ctx *ctx, + struct netlink_callback *cb) { + void *ehdr; int ret; + ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + ðtool_genl_family, 0, ctx->ops->reply_cmd); + if (!ehdr) + return -EMSGSIZE; + ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev); rtnl_lock(); ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, NULL); @@ -395,6 +402,10 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, if (ctx->ops->cleanup_data) ctx->ops->cleanup_data(ctx->reply_data); ctx->reply_data->dev = NULL; + if (ret < 0) + genlmsg_cancel(skb, ehdr); + else + genlmsg_end(skb, ehdr); return ret; } @@ -411,7 +422,6 @@ static int ethnl_default_dumpit(struct sk_buff *skb, int s_idx = ctx->pos_idx; int h, idx = 0; int ret = 0; - void *ehdr; rtnl_lock(); for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { @@ -431,26 +441,15 @@ static int ethnl_default_dumpit(struct sk_buff *skb, dev_hold(dev); rtnl_unlock(); - ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - ðtool_genl_family, 0, - ctx->ops->reply_cmd); - if (!ehdr) { - dev_put(dev); - ret = -EMSGSIZE; - goto out; - } - ret = ethnl_default_dump_one(skb, dev, ctx); + ret = ethnl_default_dump_one(skb, dev, ctx, cb); dev_put(dev); if (ret < 0) { - genlmsg_cancel(skb, ehdr); if (ret == -EOPNOTSUPP) goto lock_and_cont; if (likely(skb->len)) ret = skb->len; goto out; } - genlmsg_end(skb, ehdr); lock_and_cont: rtnl_lock(); if (net->dev_base_seq != seq) { diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 478852ef98efb8ededf021d90fce8d6785f55eaa..a6f4e9f65b1486aa04f4bdb57f59acc5e7c83c8c 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -415,6 +415,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], unsigned char multicast_spec, u8 protocol_version, struct netlink_ext_ack *extack) { + bool unregister = false; struct hsr_priv *hsr; int res; @@ -466,25 +467,27 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], if (res) goto err_unregister; + unregister = true; + res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack); if (res) - goto err_add_slaves; + goto err_unregister; res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack); if (res) - goto err_add_slaves; + goto err_unregister; hsr_debugfs_init(hsr, hsr_dev); mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD)); return 0; -err_add_slaves: - unregister_netdevice(hsr_dev); err_unregister: hsr_del_ports(hsr); err_add_master: hsr_del_self_node(hsr); + if (unregister) + unregister_netdevice(hsr_dev); return res; } diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index ed13760463decffabaad9144f1117dc6b7cb46c2..1ea17752fffc9b6d502b4997bcf97bb7d20fbab0 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -120,13 +120,18 @@ static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame, return skb_clone(frame->skb_std, GFP_ATOMIC); } -static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame, - struct hsr_port *port, u8 proto_version) +static struct sk_buff *hsr_fill_tag(struct sk_buff *skb, + struct hsr_frame_info *frame, + struct hsr_port *port, u8 proto_version) { struct hsr_ethhdr *hsr_ethhdr; int lane_id; int lsdu_size; + /* pad to minimum packet size which is 60 + 6 (HSR tag) */ + if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) + return NULL; + if (port->type == HSR_PT_SLAVE_A) lane_id = 0; else @@ -144,6 +149,8 @@ static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame, hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; hsr_ethhdr->ethhdr.h_proto = htons(proto_version ? ETH_P_HSR : ETH_P_PRP); + + return skb; } static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, @@ -172,9 +179,10 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, memmove(dst, src, movelen); skb_reset_mac_header(skb); - hsr_fill_tag(skb, frame, port, port->hsr->prot_version); - - return skb; + /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in + * that case + */ + return hsr_fill_tag(skb, frame, port, port->hsr->prot_version); } /* If the original frame was an HSR tagged frame, just clone it to be sent diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 03b89190431443b29c6fe290de6fc74a5064c468..530de24b1fb5739021088f786eda0a2c5c37e45f 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -325,7 +325,8 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb, if (port->type != node_dst->addr_B_port) return; - ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B); + if (is_valid_ether_addr(node_dst->macaddress_B)) + ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B); } void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 248f1c1959a63c4aa22331d2a937aa5aecaf3f0e..3c65f71d0e820c04112d9d87f792315f7c0f1f72 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1864,7 +1864,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) while ((l = leaf_walk_rcu(&tp, key)) != NULL) { struct key_vector *local_l = NULL, *local_tp; - hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + hlist_for_each_entry(fa, &l->leaf, fa_list) { struct fib_alias *new_fa; if (local_tb->tb_id != fa->tb_id) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 956a806649f7ef8df08c259acc4dbacc3d0e3ca3..e30515f898023abf821394c6374dd27fadeec9b1 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) ipcm_init(&ipc); inet->tos = ip_hdr(skb)->tos; - sk->sk_mark = mark; + ipc.sockc.mark = mark; daddr = ipc.addr = ip_hdr(skb)->saddr; saddr = fib_compute_spec_dst(skb); @@ -710,10 +710,10 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; - sk->sk_mark = mark; ipcm_init(&ipc); ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts.opt; + ipc.sockc.mark = mark; rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, type, code, &icmp_param); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 090d3097ee15baa87695c530278761fb26534ad5..17206677d5033d15db6249801169e8191d61bcd6 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1702,7 +1702,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; sk->sk_sndbuf = sysctl_wmem_default; - sk->sk_mark = fl4.flowi4_mark; + ipc.sockc.mark = fl4.flowi4_mark; err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); if (unlikely(err)) { diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 181b7a2a024766a7370dbb657acac85ed1f49016..f8b419e2475c9f7ec55a2fb0e4c0906c1f5f7102 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -844,3 +844,21 @@ void ip_tunnel_unneed_metadata(void) static_branch_dec(&ip_tunnel_metadata_cnt); } EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata); + +/* Returns either the correct skb->protocol value, or 0 if invalid. */ +__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb) +{ + if (skb_network_header(skb) >= skb->head && + (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) && + ip_hdr(skb)->version == 4) + return htons(ETH_P_IP); + if (skb_network_header(skb) >= skb->head && + (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) && + ipv6_hdr(skb)->version == 6) + return htons(ETH_P_IPV6); + return 0; +} +EXPORT_SYMBOL(ip_tunnel_parse_protocol); + +const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol }; +EXPORT_SYMBOL(ip_tunnel_header_ops); diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 1d9c8cff5ac332f3790540b4f02ef3d0157842ab..460ca1099e8acf82b639d4ec02393a93ec7ccd71 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -441,6 +441,7 @@ static const struct net_device_ops vti_netdev_ops = { static void vti_tunnel_setup(struct net_device *dev) { dev->netdev_ops = &vti_netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; dev->type = ARPHRD_TUNNEL; ip_tunnel_setup(dev, vti_net_id); } diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 40fea52c82773fcaccb79ac94a155e4d7d30f8d5..75d35e76bec2c4b185c821156d84f31c2b7b0f7a 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -361,6 +361,7 @@ static const struct net_device_ops ipip_netdev_ops = { static void ipip_tunnel_setup(struct net_device *dev) { dev->netdev_ops = &ipip_netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; dev->type = ARPHRD_TUNNEL; dev->flags = IFF_NOARP; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 535427292194eb57e8a268f882174710ccb76b30..df6fbefe44d4b4bc9507c5ac124da9ce4adaf39f 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -786,6 +786,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) inet_sk_flowi_flags(sk), faddr, saddr, 0, 0, sk->sk_uid); + fl4.fl4_icmp_type = user_icmph.type; + fl4.fl4_icmp_code = user_icmph.code; + security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 1d7076b78e630b7621cd7b87941edda9ebf1cc7c..a01efa062f6bcd16ef3e1f917a0a1397ac7a3507 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2027,7 +2027,7 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, const struct sk_buff *hint) { struct in_device *in_dev = __in_dev_get_rcu(dev); - struct rtable *rt = (struct rtable *)hint; + struct rtable *rt = skb_rtable(hint); struct net *net = dev_net(dev); int err = -EINVAL; u32 tag = 0; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 810cc164f795f8e1e8ca747ed5df51bb20fec8a2..6f0caf9a866de912d1903a95bb63abe3f70f7578 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2691,6 +2691,9 @@ int tcp_disconnect(struct sock *sk, int flags) tp->window_clamp = 0; tp->delivered = 0; tp->delivered_ce = 0; + if (icsk->icsk_ca_ops->release) + icsk->icsk_ca_ops->release(sk); + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); tcp_set_ca_state(sk, TCP_CA_Open); tp->is_sack_reneg = 0; tcp_clear_retrans(tp); @@ -3246,10 +3249,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: - if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) - err = tp->af_specific->md5_parse(sk, optname, optval, optlen); - else - err = -EINVAL; + err = tp->af_specific->md5_parse(sk, optname, optval, optlen); break; #endif case TCP_USER_TIMEOUT: @@ -4033,11 +4033,14 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data); int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) { + u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ struct scatterlist sg; - sg_init_one(&sg, key->key, key->keylen); - ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen); - return crypto_ahash_update(hp->md5_req); + sg_init_one(&sg, key->key, keylen); + ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); + + /* We use data_race() because tcp_md5_do_add() might change key->key under us */ + return data_race(crypto_ahash_update(hp->md5_req)); } EXPORT_SYMBOL(tcp_md5_hash_key); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 3172e31987be4232af90e7b204742c5bb09ef6ca..62878cf26d9cc5c0ae44d5ecdadd0b7a5acf5365 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -197,7 +197,7 @@ static void tcp_reinit_congestion_control(struct sock *sk, icsk->icsk_ca_setsockopt = 1; memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); - if (sk->sk_state != TCP_CLOSE) + if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) tcp_init_congestion_control(sk); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f3a0eb139b7633ebc1ddb801de232bcd3a0cbdc6..518f04355fbf33463087153e85f8c5b734ccae15 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3488,10 +3488,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) } } -/* This routine deals with acks during a TLP episode. - * We mark the end of a TLP episode on receiving TLP dupack or when - * ack is after tlp_high_seq. - * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. +/* This routine deals with acks during a TLP episode and ends an episode by + * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack */ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) { @@ -3500,7 +3498,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) if (before(ack, tp->tlp_high_seq)) return; - if (flag & FLAG_DSACKING_ACK) { + if (!tp->tlp_retrans) { + /* TLP of new data has been acknowledged */ + tp->tlp_high_seq = 0; + } else if (flag & FLAG_DSACKING_ACK) { /* This DSACK means original and TLP probe arrived; no loss */ tp->tlp_high_seq = 0; } else if (after(ack, tp->tlp_high_seq)) { @@ -4582,6 +4583,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); + sk->sk_data_ready(sk); tcp_drop(sk, skb); return; } @@ -4828,6 +4830,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) sk_forced_mem_schedule(sk, skb->truesize); else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); + sk->sk_data_ready(sk); goto drop; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ad6435ba6d72ffd8caf783bb25cad7ec151d6909..04bfcbbfee83aadf5bca0332275c57113abdbc75 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1111,9 +1111,21 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index); if (key) { - /* Pre-existing entry - just update that one. */ - memcpy(key->key, newkey, newkeylen); - key->keylen = newkeylen; + /* Pre-existing entry - just update that one. + * Note that the key might be used concurrently. + * data_race() is telling kcsan that we do not care of + * key mismatches, since changing MD5 key on live flows + * can lead to packet drops. + */ + data_race(memcpy(key->key, newkey, newkeylen)); + + /* Pairs with READ_ONCE() in tcp_md5_hash_key(). + * Also note that a reader could catch new key->keylen value + * but old key->key[], this is the reason we use __GFP_ZERO + * at sock_kmalloc() time below these lines. + */ + WRITE_ONCE(key->keylen, newkeylen); + return 0; } @@ -1129,7 +1141,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, rcu_assign_pointer(tp->md5sig_info, md5sig); } - key = sock_kmalloc(sk, sizeof(*key), gfp); + key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO); if (!key) return -ENOMEM; if (!tcp_alloc_md5sig_pool()) { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a50e1990a845a258d4cc6a2a989d09068ea3a973..0bc05d68cd74731bde705b3b8fab2e73ccb03fae 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -700,7 +700,8 @@ static unsigned int tcp_synack_options(const struct sock *sk, unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts, const struct tcp_md5sig_key *md5, - struct tcp_fastopen_cookie *foc) + struct tcp_fastopen_cookie *foc, + enum tcp_synack_type synack_type) { struct inet_request_sock *ireq = inet_rsk(req); unsigned int remaining = MAX_TCP_OPTION_SPACE; @@ -715,7 +716,8 @@ static unsigned int tcp_synack_options(const struct sock *sk, * rather than TS in order to fit in better with old, * buggy kernels, but that was deemed to be unnecessary. */ - ireq->tstamp_ok &= !ireq->sack_ok; + if (synack_type != TCP_SYNACK_COOKIE) + ireq->tstamp_ok &= !ireq->sack_ok; } #endif @@ -2622,6 +2624,11 @@ void tcp_send_loss_probe(struct sock *sk) int pcount; int mss = tcp_current_mss(sk); + /* At most one outstanding TLP */ + if (tp->tlp_high_seq) + goto rearm_timer; + + tp->tlp_retrans = 0; skb = tcp_send_head(sk); if (skb && tcp_snd_wnd_test(tp, skb, mss)) { pcount = tp->packets_out; @@ -2639,10 +2646,6 @@ void tcp_send_loss_probe(struct sock *sk) return; } - /* At most one outstanding TLP retransmission. */ - if (tp->tlp_high_seq) - goto rearm_timer; - if (skb_still_in_host_queue(sk, skb)) goto rearm_timer; @@ -2664,10 +2667,12 @@ void tcp_send_loss_probe(struct sock *sk) if (__tcp_retransmit_skb(sk, skb, 1)) goto rearm_timer; + tp->tlp_retrans = 1; + +probe_sent: /* Record snd_nxt for loss detection. */ tp->tlp_high_seq = tp->snd_nxt; -probe_sent: NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); /* Reset s.t. tcp_rearm_rto will restart timer from now */ inet_csk(sk)->icsk_pending = 0; @@ -3394,7 +3399,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, #endif skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5, - foc) + sizeof(*th); + foc, synack_type) + sizeof(*th); skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1b7ebbcae4971c304c6eafd441bd708e37805de2..4077d589b72efe4849dc83ce786558f5cbef3816 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -416,7 +416,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, struct udp_hslot *hslot2, struct sk_buff *skb) { - struct sock *sk, *result; + struct sock *sk, *result, *reuseport_result; int score, badness; u32 hash = 0; @@ -426,17 +426,20 @@ static struct sock *udp4_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { + reuseport_result = NULL; + if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); - result = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (result && !reuseport_has_conns(sk, false)) - return result; + reuseport_result = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + if (reuseport_result && !reuseport_has_conns(sk, false)) + return reuseport_result; } + + result = reuseport_result ? : sk; badness = score; - result = sk; } } return result; @@ -2051,7 +2054,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) /* * UDP-Lite specific tests, ignored on UDP sockets */ - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { /* * MIB statistics other than incrementing the error count are diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 893261230ffc0a0a3e0e207db4ba9c7c8b3ef1f2..dacdea7fcb628d34b684da234d130d0e95f3e90a 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -183,7 +183,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return 0; } -void ipv6_sock_ac_close(struct sock *sk) +void __ipv6_sock_ac_close(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct net_device *dev = NULL; @@ -191,10 +191,7 @@ void ipv6_sock_ac_close(struct sock *sk) struct net *net = sock_net(sk); int prev_index; - if (!np->ipv6_ac_list) - return; - - rtnl_lock(); + ASSERT_RTNL(); pac = np->ipv6_ac_list; np->ipv6_ac_list = NULL; @@ -211,6 +208,16 @@ void ipv6_sock_ac_close(struct sock *sk) sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } +} + +void ipv6_sock_ac_close(struct sock *sk) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + if (!np->ipv6_ac_list) + return; + rtnl_lock(); + __ipv6_sock_ac_close(sk); rtnl_unlock(); } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index c4359277112631ab6169df14e8bedb7e45373b04..52c2f063529fbf9e33326eb6d3887f8c810b26eb 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -805,10 +805,17 @@ int esp6_input_done2(struct sk_buff *skb, int err) if (x->encap) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); + int offset = skb_network_offset(skb) + sizeof(*ip6h); struct xfrm_encap_tmpl *encap = x->encap; - struct udphdr *uh = (void *)(skb_network_header(skb) + hdr_len); - struct tcphdr *th = (void *)(skb_network_header(skb) + hdr_len); - __be16 source; + u8 nexthdr = ip6h->nexthdr; + __be16 frag_off, source; + struct udphdr *uh; + struct tcphdr *th; + + offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); + uh = (void *)(skb->data + offset); + th = (void *)(skb->data + offset); + hdr_len += offset; switch (x->encap->encap_type) { case TCP_ENCAP_ESPINTCP: diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index fc5000370030d67094ba11f15aaaaaa7ba519cde..9df8737ae0d3294319c5d91651b2b9317b0fba08 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -566,7 +566,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL); security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); - sk->sk_mark = mark; np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl6)) @@ -583,6 +582,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, fl6.flowi6_oif = np->ucast_oif; ipcm6_init_sk(&ipc6, np); + ipc6.sockc.mark = mark; fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = icmpv6_route_lookup(net, skb, sk, &fl6); @@ -751,7 +751,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb) sk = icmpv6_xmit_lock(net); if (!sk) goto out_bh_enable; - sk->sk_mark = mark; np = inet6_sk(sk); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) @@ -779,6 +778,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) ipcm6_init_sk(&ipc6, np); ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb)); + ipc6.sockc.mark = mark; if (ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 6532bde82b40a1abdaae492c75015747652e0a2a..3a57fb9ce0494beda6500689c4ce1086014fe210 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1562,17 +1562,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) static int __net_init ip6gre_init_net(struct net *net) { struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); + struct net_device *ndev; int err; if (!net_has_fallback_tunnels(net)) return 0; - ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", - NET_NAME_UNKNOWN, - ip6gre_tunnel_setup); - if (!ign->fb_tunnel_dev) { + ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", + NET_NAME_UNKNOWN, ip6gre_tunnel_setup); + if (!ndev) { err = -ENOMEM; goto err_alloc_dev; } + ign->fb_tunnel_dev = ndev; dev_net_set(ign->fb_tunnel_dev, net); /* FB netdevice is special: we have one, and only one per netns. * Allowing to move it to another netns is clearly unsafe. @@ -1592,7 +1593,7 @@ static int __net_init ip6gre_init_net(struct net *net) return 0; err_reg_dev: - free_netdev(ign->fb_tunnel_dev); + free_netdev(ndev); err_alloc_dev: return err; } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 821d96c720b936ae732c388c02977eef213291bb..a18c378ca5f46a1648c86b4d4d7ff9da6e4422bb 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1846,6 +1846,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = { static void ip6_tnl_dev_setup(struct net_device *dev) { dev->netdev_ops = &ip6_tnl_netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; dev->needs_free_netdev = true; dev->priv_destructor = ip6_dev_free; diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 1147f647b9a0f109f6178f7d11ce65e93ef8120c..0d964160a9dd559b6c2624ae129aaf899c9292ea 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -905,6 +905,7 @@ static const struct net_device_ops vti6_netdev_ops = { static void vti6_dev_setup(struct net_device *dev) { dev->netdev_ops = &vti6_netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; dev->needs_free_netdev = true; dev->priv_destructor = vti6_dev_free; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 20576e87a5f7e87b99d6fbd5370771738b3a52e0..76f9e41859a240287cf70dbce29932b6f6b6c325 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -240,6 +240,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, fl6_free_socklist(sk); __ipv6_sock_mc_close(sk); + __ipv6_sock_ac_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 82cbb46a2a4fe48c328e5c5522d00bb02019335d..4c36bd0c7930fbb10190c60b1415430520cad56b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -431,9 +431,12 @@ void fib6_select_path(const struct net *net, struct fib6_result *res, struct fib6_info *sibling, *next_sibling; struct fib6_info *match = res->f6i; - if ((!match->fib6_nsiblings && !match->nh) || have_oif_match) + if (!match->nh && (!match->fib6_nsiblings || have_oif_match)) goto out; + if (match->nh && have_oif_match && res->nh) + return; + /* We might have already computed the hash for ICMPv6 errors. In such * case it will always be non-zero. Otherwise now is the time to do it. */ @@ -3402,7 +3405,7 @@ static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type) if ((flags & RTF_REJECT) || (dev && (dev->flags & IFF_LOOPBACK) && !(addr_type & IPV6_ADDR_LOOPBACK) && - !(flags & RTF_LOCAL))) + !(flags & (RTF_ANYCAST | RTF_LOCAL)))) return true; return false; @@ -3682,14 +3685,14 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, rt->fib6_src.plen = cfg->fc_src_len; #endif if (nh) { - if (!nexthop_get(nh)) { - NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); - goto out; - } if (rt->fib6_src.plen) { NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); goto out; } + if (!nexthop_get(nh)) { + NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); + goto out; + } rt->nh = nh; fib6_nh = nexthop_fib6_nh(rt->nh); } else { diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1fbb4dfbb191bb82b7a8b32c11e74824533b08b2..5e2c34c0ac973643f2fdffee0e9c2d1214922425 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1421,6 +1421,7 @@ static void ipip6_tunnel_setup(struct net_device *dev) int t_hlen = tunnel->hlen + sizeof(struct iphdr); dev->netdev_ops = &ipip6_netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; dev->needs_free_netdev = true; dev->priv_destructor = ipip6_dev_free; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 7d4151747340f79bb84bbfa4834080abe7d74a8b..a8d74f44056a681ef9057c4c4abb34016120b44f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -148,7 +148,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, int dif, int sdif, struct udp_hslot *hslot2, struct sk_buff *skb) { - struct sock *sk, *result; + struct sock *sk, *result, *reuseport_result; int score, badness; u32 hash = 0; @@ -158,17 +158,20 @@ static struct sock *udp6_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { + reuseport_result = NULL; + if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); - result = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (result && !reuseport_has_conns(sk, false)) - return result; + reuseport_result = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + if (reuseport_result && !reuseport_has_conns(sk, false)) + return reuseport_result; } - result = sk; + + result = reuseport_result ? : sk; badness = score; } } @@ -643,7 +646,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb) /* * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). */ - if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { + if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { if (up->pcrlen == 0) { /* full coverage was set */ net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", diff --git a/net/key/af_key.c b/net/key/af_key.c index b67ed3a8486c259b33a79bf4758aa1a16e972705..a915bc86620afe7a74c68ff684324a64e415c0d2 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1849,6 +1849,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; + if ((xfilter->sadb_x_filter_splen >= + (sizeof(xfrm_address_t) << 3)) || + (xfilter->sadb_x_filter_dplen >= + (sizeof(xfrm_address_t) << 3))) { + mutex_unlock(&pfk->dump_lock); + return -EINVAL; + } filter = kmalloc(sizeof(*filter), GFP_KERNEL); if (filter == NULL) { mutex_unlock(&pfk->dump_lock); @@ -2400,7 +2407,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa return err; } - xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 1, &err); security_xfrm_policy_free(pol_ctx); @@ -2651,7 +2658,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_ return -EINVAL; delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); - xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN, + xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN, dir, pol->sadb_x_policy_id, delete, &err); if (xp == NULL) return -ENOENT; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 6d7ef78c88af059a4cbfb5d89f32ad6d1babfe74..6434d17e6e8eaccb1fad97615db78b38a765aaaa 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1028,6 +1028,7 @@ static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, /* Queue the packet to IP for output */ skb->ignore_df = 1; + skb_dst_drop(skb); #if IS_ENABLED(CONFIG_IPV6) if (l2tp_sk_is_v6(tunnel->sock)) error = inet6_csk_xmit(tunnel->sock, skb, NULL); @@ -1099,10 +1100,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len goto out_unlock; } - /* Get routing info from the tunnel socket */ - skb_dst_drop(skb); - skb_dst_set(skb, sk_dst_check(sk, 0)); - inet = inet_sk(sk); fl = &inet->cork.fl; switch (tunnel->encap) { diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 54fb8d452a7b7398d8dab417664d6bb613f03723..6e53e43c19071cdb7f9b1ae35e227c24663366ff 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -273,6 +273,10 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) if (!sock_flag(sk, SOCK_ZAPPED)) goto out; + if (!addr->sllc_arphrd) + addr->sllc_arphrd = ARPHRD_ETHER; + if (addr->sllc_arphrd != ARPHRD_ETHER) + goto out; rc = -ENODEV; if (sk->sk_bound_dev_if) { llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); @@ -328,7 +332,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; - if (unlikely(addr->sllc_family != AF_LLC)) + if (!addr->sllc_arphrd) + addr->sllc_arphrd = ARPHRD_ETHER; + if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER)) goto out; dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); rc = -ENODEV; @@ -336,8 +342,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) if (sk->sk_bound_dev_if) { llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); if (llc->dev) { - if (!addr->sllc_arphrd) - addr->sllc_arphrd = llc->dev->type; if (is_zero_ether_addr(addr->sllc_mac)) memcpy(addr->sllc_mac, llc->dev->dev_addr, IFHWADDRLEN); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 9b360544ad6ff6c537677a27278e38ffd41ef09c..1079a07e43e499192cf7cddbe3e52261c7542e47 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2166,6 +2166,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) ieee80211_stop_mesh(sdata); mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); + kfree(sdata->u.mesh.ie); mutex_unlock(&sdata->local->mtx); return 0; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 5f1ca25b6c97e0f4b3e199c7b7473daacffb24de..e88beb3ff6db6806077ca774be9b143e6b59f8e9 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -617,6 +617,19 @@ int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata, int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { + struct ieee80211_supported_band *sband; + const struct ieee80211_sband_iftype_data *iftd; + + sband = ieee80211_get_sband(sdata); + if (!sband) + return -EINVAL; + + iftd = ieee80211_get_sband_iftype_data(sband, + NL80211_IFTYPE_MESH_POINT); + /* The device doesn't support HE in mesh mode or at all */ + if (!iftd) + return 0; + ieee80211_ie_build_he_6ghz_cap(sdata, skb); return 0; } diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index aa5150929996d609ccf8075465e12c70dbd9fc18..02cde0fd08fe8616d8489ce7c174b15bdd88b214 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1105,11 +1105,8 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) ttl, lifetime, 0, ifmsh->preq_id++, sdata); spin_lock_bh(&mpath->state_lock); - if (mpath->flags & MESH_PATH_DELETED) { - spin_unlock_bh(&mpath->state_lock); - goto enddiscovery; - } - mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); + if (!(mpath->flags & MESH_PATH_DELETED)) + mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); spin_unlock_bh(&mpath->state_lock); enddiscovery: diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 117519bf33d65be4d74f9c28ce7abab1619dbb3d..aca608ae313fe31d35a53f968065ec3a4602065d 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -521,6 +521,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, del_timer_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); + mesh_path_flush_pending(mpath); kfree_rcu(mpath, rcu); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index a88ab6fb16f20d738aaa74dcb21304bf2e08a4af..5c5af4b5fc080211f395cf8816937ee360b6ff9b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2396,6 +2396,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) { + struct ieee80211_hdr *hdr = (void *)rx->skb->data; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); @@ -2406,6 +2407,31 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) if (status->flag & RX_FLAG_DECRYPTED) return 0; + /* check mesh EAPOL frames first */ + if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) && + ieee80211_is_data(fc))) { + struct ieee80211s_hdr *mesh_hdr; + u16 hdr_len = ieee80211_hdrlen(fc); + u16 ethertype_offset; + __be16 ethertype; + + if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr)) + goto drop_check; + + /* make sure fixed part of mesh header is there, also checks skb len */ + if (!pskb_may_pull(rx->skb, hdr_len + 6)) + goto drop_check; + + mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len); + ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) + + sizeof(rfc1042_header); + + if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 && + ethertype == rx->sdata->control_port_protocol) + return 0; + } + +drop_check: /* Drop unencrypted frames if key is set. */ if (unlikely(!ieee80211_has_protected(fc) && !ieee80211_is_any_nullfunc(fc) && diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index cd8487bc6fc2e368f01e41275518a4056c1cc552..af4cc5fb678edf98b2f3ea8cb46c5d76ee423883 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1923,9 +1923,7 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, if (sta) { tx_pending = atomic_sub_return(tx_airtime, &sta->airtime[ac].aql_tx_pending); - if (WARN_ONCE(tx_pending < 0, - "STA %pM AC %d txq pending airtime underflow: %u, %u", - sta->addr, ac, tx_pending, tx_airtime)) + if (tx_pending < 0) atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending, tx_pending, 0); } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 7b1bacac39c6ee3a43d1a7755421d0c8f023d13a..cbc40b358ba264c6d8d68b434f4343d9d96bdd12 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -639,11 +639,23 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie; struct ieee80211_sub_if_data *sdata; struct ieee80211_hdr *hdr = (void *)skb->data; + __be16 ethertype = 0; + + if (skb->len >= ETH_HLEN && skb->protocol == cpu_to_be16(ETH_P_802_3)) + skb_copy_bits(skb, 2 * ETH_ALEN, ðertype, ETH_TLEN); rcu_read_lock(); sdata = ieee80211_sdata_from_skb(local, skb); if (sdata) { - if (ieee80211_is_any_nullfunc(hdr->frame_control)) + if (ethertype == sdata->control_port_protocol || + ethertype == cpu_to_be16(ETH_P_PREAUTH)) + cfg80211_control_port_tx_status(&sdata->wdev, + cookie, + skb->data, + skb->len, + acked, + GFP_ATOMIC); + else if (ieee80211_is_any_nullfunc(hdr->frame_control)) cfg80211_probe_status(sdata->dev, hdr->addr1, cookie, acked, info->status.ack_signal, @@ -654,12 +666,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local, skb->data, skb->len, acked, GFP_ATOMIC); else - cfg80211_control_port_tx_status(&sdata->wdev, - cookie, - skb->data, - skb->len, - acked, - GFP_ATOMIC); + pr_warn("Unknown status report in ack skb\n"); + } rcu_read_unlock(); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e9ce658141f51a2e6cc813dd03997e154a1f07a2..3529d1368068232e238b59789ca4f881e1fd2a2b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3996,6 +3996,9 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, skb_list_walk_safe(skb, skb, next) { skb_mark_not_on_list(skb); + if (skb->protocol == sdata->control_port_protocol) + ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; + skb = ieee80211_build_hdr(sdata, skb, info_flags, sta, ctrl_flags, cookie); if (IS_ERR(skb)) { @@ -4206,7 +4209,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata, (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER))) ra = sdata->u.mgd.bssid; - if (!is_valid_ether_addr(ra)) + if (is_zero_ether_addr(ra)) goto out_free; multicast = is_multicast_ether_addr(ra); @@ -4227,11 +4230,12 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata, test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) goto out_free; + memset(info, 0, sizeof(*info)); + if (unlikely(!multicast && skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) - ieee80211_store_ack_skb(local, skb, &info->flags, NULL); - - memset(info, 0, sizeof(*info)); + info->ack_frame_id = ieee80211_store_ack_skb(local, skb, + &info->flags, NULL); if (unlikely(sdata->control_port_protocol == ehdr->h_proto)) { if (sdata->control_port_no_encrypt) @@ -5371,7 +5375,8 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev, return -EINVAL; if (proto == sdata->control_port_protocol) - ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO | + IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP; if (unencrypted) flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 21c94094a699df549511b37f58e4da5101561fba..dd9f5c7a1ade63cf30348f210e654b87ed6ea8ac 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2878,6 +2878,10 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata, if (WARN_ON(!iftd)) return; + /* Check for device HE 6 GHz capability before adding element */ + if (!iftd->he_6ghz_capa.capa) + return; + cap = le16_to_cpu(iftd->he_6ghz_capa.capa); cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS; diff --git a/net/mptcp/options.c b/net/mptcp/options.c index df9a51425c6fc4769c06fcc0f09dbfa7ef554683..8f940be42f98a567b84a26d398200d3627d5efbc 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -449,9 +449,9 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, } static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, - struct mptcp_ext *ext) + struct sk_buff *skb, struct mptcp_ext *ext) { - if (!ext->use_map) { + if (!ext->use_map || !skb->len) { /* RFC6824 requires a DSS mapping with specific values * if DATA_FIN is set but no data payload is mapped */ @@ -503,7 +503,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, opts->ext_copy = *mpext; if (skb && tcp_fin && subflow->data_fin_tx_enable) - mptcp_write_data_fin(subflow, &opts->ext_copy); + mptcp_write_data_fin(subflow, skb, &opts->ext_copy); ret = true; } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 3980fbb6f31ea154e12ef41f896916892eb015b2..c0abe738e7d31230e884e2154c1f28d49785faf4 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1833,7 +1833,7 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, /* on successful connect, the msk state will be moved to established by * subflow_finish_connect() */ - if (!err || err == EINPROGRESS) + if (!err || err == -EINPROGRESS) mptcp_copy_inaddrs(sock->sk, ssock->sk); else inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index 486959f70cf313b44b06fdaffef6827fbe04eaba..a8ce04a4bb72abef52524edad94dc7d69d39458a 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -326,7 +326,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], set->variant = &bitmap_ip; if (!init_map_ip(set, map, first_ip, last_ip, elements, hosts, netmask)) { - kfree(map); + ip_set_free(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 2310a316e0affc69676062f7d984126e6a0b8e83..2c625e0f49ec020581206445b3365663d9c19746 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -363,7 +363,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); set->variant = &bitmap_ipmac; if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { - kfree(map); + ip_set_free(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index e56ced66f202d6d8b6e913f0349a9991583d4014..7138e080def4cfd7abf40a9ca485212773dda8ec 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c @@ -274,7 +274,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); set->variant = &bitmap_port; if (!init_map_port(set, map, first_port, last_port)) { - kfree(map); + ip_set_free(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index 1ee43752d6d3ccb1eae1684369a8be5ea5b96656..521e970be4028de7659b437616f844744362f10b 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -682,7 +682,7 @@ mtype_resize(struct ip_set *set, bool retried) } t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits)); if (!t->hregion) { - kfree(t); + ip_set_free(t); ret = -ENOMEM; goto out; } @@ -1533,7 +1533,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, } t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits)); if (!t->hregion) { - kfree(t); + ip_set_free(t); kfree(h); return -ENOMEM; } diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 605e0f68f8bd30a44b686bbbb6d625be79c23247..2b8abbfe018cf4758541ac1439bf91f6090f750b 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1717,6 +1717,8 @@ static int sync_thread_backup(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = tinfo->ipvs; + struct sock *sk = tinfo->sock->sk; + struct udp_sock *up = udp_sk(sk); int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " @@ -1724,12 +1726,14 @@ static int sync_thread_backup(void *data) ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id); while (!kthread_should_stop()) { - wait_event_interruptible(*sk_sleep(tinfo->sock->sk), - !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) - || kthread_should_stop()); + wait_event_interruptible(*sk_sleep(sk), + !skb_queue_empty_lockless(&sk->sk_receive_queue) || + !skb_queue_empty_lockless(&up->reader_queue) || + kthread_should_stop()); /* do we have data now? */ - while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { + while (!skb_queue_empty_lockless(&sk->sk_receive_queue) || + !skb_queue_empty_lockless(&up->reader_queue)) { len = ip_vs_receive(tinfo->sock, tinfo->buf, ipvs->bcfg.sync_maxlen); if (len <= 0) { diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 79cd9dde457b190ade6f737d19b0d34cf19b00d3..f33d72c5b06e123dd97c11933467b3197877d1d5 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -2158,6 +2158,8 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb) err = __nf_conntrack_update(net, skb, ct, ctinfo); if (err < 0) return err; + + ct = nf_ct_get(skb, &ctinfo); } return nf_confirm_cthelper(skb, ct, ctinfo); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 7647ecfa0d4078e6fd62d73238d52b61f29a7795..88325b264737f3e3c32feb8c40f98988cbba98e2 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -188,24 +188,6 @@ static void nft_netdev_unregister_hooks(struct net *net, nf_unregister_net_hook(net, &hook->ops); } -static int nft_register_basechain_hooks(struct net *net, int family, - struct nft_base_chain *basechain) -{ - if (family == NFPROTO_NETDEV) - return nft_netdev_register_hooks(net, &basechain->hook_list); - - return nf_register_net_hook(net, &basechain->ops); -} - -static void nft_unregister_basechain_hooks(struct net *net, int family, - struct nft_base_chain *basechain) -{ - if (family == NFPROTO_NETDEV) - nft_netdev_unregister_hooks(net, &basechain->hook_list); - else - nf_unregister_net_hook(net, &basechain->ops); -} - static int nf_tables_register_hook(struct net *net, const struct nft_table *table, struct nft_chain *chain) @@ -223,7 +205,10 @@ static int nf_tables_register_hook(struct net *net, if (basechain->type->ops_register) return basechain->type->ops_register(net, ops); - return nft_register_basechain_hooks(net, table->family, basechain); + if (table->family == NFPROTO_NETDEV) + return nft_netdev_register_hooks(net, &basechain->hook_list); + + return nf_register_net_hook(net, &basechain->ops); } static void nf_tables_unregister_hook(struct net *net, @@ -242,7 +227,10 @@ static void nf_tables_unregister_hook(struct net *net, if (basechain->type->ops_unregister) return basechain->type->ops_unregister(net, ops); - nft_unregister_basechain_hooks(net, table->family, basechain); + if (table->family == NFPROTO_NETDEV) + nft_netdev_unregister_hooks(net, &basechain->hook_list); + else + nf_unregister_net_hook(net, &basechain->ops); } static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) @@ -832,8 +820,7 @@ static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt) if (cnt && i++ == cnt) break; - nft_unregister_basechain_hooks(net, table->family, - nft_base_chain(chain)); + nf_tables_unregister_hook(net, table, chain); } } @@ -848,8 +835,7 @@ static int nf_tables_table_enable(struct net *net, struct nft_table *table) if (!nft_is_base_chain(chain)) continue; - err = nft_register_basechain_hooks(net, table->family, - nft_base_chain(chain)); + err = nf_tables_register_hook(net, table, chain); if (err < 0) goto err_register_hooks; @@ -894,11 +880,12 @@ static int nf_tables_updtable(struct nft_ctx *ctx) nft_trans_table_enable(trans) = false; } else if (!(flags & NFT_TABLE_F_DORMANT) && ctx->table->flags & NFT_TABLE_F_DORMANT) { + ctx->table->flags &= ~NFT_TABLE_F_DORMANT; ret = nf_tables_table_enable(ctx->net, ctx->table); - if (ret >= 0) { - ctx->table->flags &= ~NFT_TABLE_F_DORMANT; + if (ret >= 0) nft_trans_table_enable(trans) = true; - } + else + ctx->table->flags |= NFT_TABLE_F_DORMANT; } if (ret < 0) goto err; diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 55ee680e9db180b2d2e6d56db194c006df8049d8..9395ee8a868dbedba93547cb7d948c53b21ec9cb 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -351,22 +351,11 @@ int genl_register_family(struct genl_family *family) start = end = GENL_ID_VFS_DQUOT; } - if (family->maxattr && !family->parallel_ops) { - family->attrbuf = kmalloc_array(family->maxattr + 1, - sizeof(struct nlattr *), - GFP_KERNEL); - if (family->attrbuf == NULL) { - err = -ENOMEM; - goto errout_locked; - } - } else - family->attrbuf = NULL; - family->id = idr_alloc_cyclic(&genl_fam_idr, family, start, end + 1, GFP_KERNEL); if (family->id < 0) { err = family->id; - goto errout_free; + goto errout_locked; } err = genl_validate_assign_mc_groups(family); @@ -385,8 +374,6 @@ int genl_register_family(struct genl_family *family) errout_remove: idr_remove(&genl_fam_idr, family->id); -errout_free: - kfree(family->attrbuf); errout_locked: genl_unlock_all(); return err; @@ -419,8 +406,6 @@ int genl_unregister_family(const struct genl_family *family) atomic_read(&genl_sk_destructing_cnt) == 0); genl_unlock(); - kfree(family->attrbuf); - genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); return 0; @@ -485,30 +470,23 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family, if (!family->maxattr) return NULL; - if (family->parallel_ops) { - attrbuf = kmalloc_array(family->maxattr + 1, - sizeof(struct nlattr *), GFP_KERNEL); - if (!attrbuf) - return ERR_PTR(-ENOMEM); - } else { - attrbuf = family->attrbuf; - } + attrbuf = kmalloc_array(family->maxattr + 1, + sizeof(struct nlattr *), GFP_KERNEL); + if (!attrbuf) + return ERR_PTR(-ENOMEM); err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr, family->policy, validate, extack); if (err) { - if (family->parallel_ops) - kfree(attrbuf); + kfree(attrbuf); return ERR_PTR(err); } return attrbuf; } -static void genl_family_rcv_msg_attrs_free(const struct genl_family *family, - struct nlattr **attrbuf) +static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf) { - if (family->parallel_ops) - kfree(attrbuf); + kfree(attrbuf); } struct genl_start_context { @@ -542,7 +520,7 @@ static int genl_start(struct netlink_callback *cb) no_attrs: info = genl_dumpit_info_alloc(); if (!info) { - genl_family_rcv_msg_attrs_free(ctx->family, attrs); + genl_family_rcv_msg_attrs_free(attrs); return -ENOMEM; } info->family = ctx->family; @@ -559,7 +537,7 @@ static int genl_start(struct netlink_callback *cb) } if (rc) { - genl_family_rcv_msg_attrs_free(info->family, info->attrs); + genl_family_rcv_msg_attrs_free(info->attrs); genl_dumpit_info_free(info); cb->data = NULL; } @@ -588,7 +566,7 @@ static int genl_lock_done(struct netlink_callback *cb) rc = ops->done(cb); genl_unlock(); } - genl_family_rcv_msg_attrs_free(info->family, info->attrs); + genl_family_rcv_msg_attrs_free(info->attrs); genl_dumpit_info_free(info); return rc; } @@ -601,7 +579,7 @@ static int genl_parallel_done(struct netlink_callback *cb) if (ops->done) rc = ops->done(cb); - genl_family_rcv_msg_attrs_free(info->family, info->attrs); + genl_family_rcv_msg_attrs_free(info->attrs); genl_dumpit_info_free(info); return rc; } @@ -694,7 +672,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family, family->post_doit(ops, skb, &info); out: - genl_family_rcv_msg_attrs_free(family, attrbuf); + genl_family_rcv_msg_attrs_free(attrbuf); return err; } @@ -1166,60 +1144,11 @@ static struct genl_family genl_ctrl __ro_after_init = { .netnsok = true, }; -static int genl_bind(struct net *net, int group) -{ - struct genl_family *f; - int err = -ENOENT; - unsigned int id; - - down_read(&cb_lock); - - idr_for_each_entry(&genl_fam_idr, f, id) { - if (group >= f->mcgrp_offset && - group < f->mcgrp_offset + f->n_mcgrps) { - int fam_grp = group - f->mcgrp_offset; - - if (!f->netnsok && net != &init_net) - err = -ENOENT; - else if (f->mcast_bind) - err = f->mcast_bind(net, fam_grp); - else - err = 0; - break; - } - } - up_read(&cb_lock); - - return err; -} - -static void genl_unbind(struct net *net, int group) -{ - struct genl_family *f; - unsigned int id; - - down_read(&cb_lock); - - idr_for_each_entry(&genl_fam_idr, f, id) { - if (group >= f->mcgrp_offset && - group < f->mcgrp_offset + f->n_mcgrps) { - int fam_grp = group - f->mcgrp_offset; - - if (f->mcast_unbind) - f->mcast_unbind(net, fam_grp); - break; - } - } - up_read(&cb_lock); -} - static int __net_init genl_pernet_init(struct net *net) { struct netlink_kernel_cfg cfg = { .input = genl_rcv, .flags = NL_CFG_F_NONROOT_RECV, - .bind = genl_bind, - .unbind = genl_unbind, }; /* we'll bump the group number right afterwards */ diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 7cd5248843041a414d2c6374c94001549bf1d7da..78ea8c94dcba05e99d9665232307d4a5bf66ff1b 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -1228,10 +1228,13 @@ int nci_register_device(struct nci_dev *ndev) rc = nfc_register_device(ndev->nfc_dev); if (rc) - goto destroy_rx_wq_exit; + goto destroy_tx_wq_exit; goto exit; +destroy_tx_wq_exit: + destroy_workqueue(ndev->tx_wq); + destroy_rx_wq_exit: destroy_workqueue(ndev->rx_wq); diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 2d8d6131bc5f7e23b168af2dcd377d55c278cd8e..300a104b9a0fb02ba3e7224a0f11a91acb6e05d6 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -166,6 +166,7 @@ static void __qrtr_node_release(struct kref *kref) { struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); struct radix_tree_iter iter; + struct qrtr_tx_flow *flow; unsigned long flags; void __rcu **slot; @@ -181,8 +182,9 @@ static void __qrtr_node_release(struct kref *kref) /* Free tx flow counters */ radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) { + flow = *slot; radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot); - kfree(*slot); + kfree(flow); } kfree(node); } @@ -427,7 +429,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) unsigned int ver; size_t hdrlen; - if (len & 3) + if (len == 0 || len & 3) return -EINVAL; skb = netdev_alloc_skb(NULL, len); @@ -441,6 +443,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) switch (ver) { case QRTR_PROTO_VER_1: + if (len < sizeof(*v1)) + goto err; v1 = data; hdrlen = sizeof(*v1); @@ -454,6 +458,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) size = le32_to_cpu(v1->size); break; case QRTR_PROTO_VER_2: + if (len < sizeof(*v2)) + goto err; v2 = data; hdrlen = sizeof(*v2) + v2->optlen; @@ -1174,6 +1180,7 @@ static int qrtr_release(struct socket *sock) sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); + sock_orphan(sk); sock->sk = NULL; if (!sock_flag(sk, SOCK_ZAPPED)) diff --git a/net/rds/connection.c b/net/rds/connection.c index ed7f2133acc2fb12a5419d0a96ca435dc7e3da78..f2fcab182095cbc39f991a4d2886aa2237aec1c8 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -905,6 +905,17 @@ void rds_conn_path_connect_if_down(struct rds_conn_path *cp) } EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down); +/* Check connectivity of all paths + */ +void rds_check_all_paths(struct rds_connection *conn) +{ + int i = 0; + + do { + rds_conn_path_connect_if_down(&conn->c_path[i]); + } while (++i < conn->c_npaths); +} + void rds_conn_connect_if_down(struct rds_connection *conn) { WARN_ON(conn->c_trans->t_mp_capable); diff --git a/net/rds/rds.h b/net/rds/rds.h index 6019b0c004a9df23fbaafd614c8b03492485b294..106e862996b94d377717745523b74b6db07e6a58 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -778,6 +778,7 @@ void rds_conn_drop(struct rds_connection *conn); void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy); void rds_conn_connect_if_down(struct rds_connection *conn); void rds_conn_path_connect_if_down(struct rds_conn_path *cp); +void rds_check_all_paths(struct rds_connection *conn); void rds_for_each_conn_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens, @@ -822,6 +823,12 @@ rds_conn_path_up(struct rds_conn_path *cp) return atomic_read(&cp->cp_state) == RDS_CONN_UP; } +static inline int +rds_conn_path_down(struct rds_conn_path *cp) +{ + return atomic_read(&cp->cp_state) == RDS_CONN_DOWN; +} + static inline int rds_conn_up(struct rds_connection *conn) { diff --git a/net/rds/recv.c b/net/rds/recv.c index c8404971d5ab337a39e344444c32887fc61616a2..aba4afe4dfedc22a6bdb45893f07a4d76f6cc2ed 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) { struct rds_notifier *notifier; - struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ + struct rds_rdma_notify cmsg; unsigned int count = 0, max_messages = ~0U; unsigned long flags; LIST_HEAD(copy); int err = 0; + memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */ /* put_cmsg copies to user space and thus may sleep. We can't do this * with rs_lock held, so first grab as many notifications as we can stuff diff --git a/net/rds/send.c b/net/rds/send.c index 68e2bdb08fd099fd930d0ea66ae037af6a3ba8d2..9a529a01cdc6a10a13d08efcd32947bf22290849 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1340,7 +1340,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) goto out; } - rds_conn_path_connect_if_down(cpath); + if (rds_conn_path_down(cpath)) + rds_check_all_paths(conn); ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); if (ret) { diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index f07970207b5447079c71d9cbd88501f07cafa53f..38a46167523fa435e673483878a4ea145a9c2e8b 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -288,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, */ ret = rxrpc_connect_call(rx, call, cp, srx, gfp); if (ret < 0) - goto error; + goto error_attached_to_socket; trace_rxrpc_call(call->debug_id, rxrpc_call_connected, atomic_read(&call->usage), here, NULL); @@ -308,18 +308,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, error_dup_user_ID: write_unlock(&rx->call_lock); release_sock(&rx->sk); - ret = -EEXIST; - -error: __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, - RX_CALL_DEAD, ret); + RX_CALL_DEAD, -EEXIST); trace_rxrpc_call(call->debug_id, rxrpc_call_error, - atomic_read(&call->usage), here, ERR_PTR(ret)); + atomic_read(&call->usage), here, ERR_PTR(-EEXIST)); rxrpc_release_call(rx, call); mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); - _leave(" = %d", ret); - return ERR_PTR(ret); + _leave(" = -EEXIST"); + return ERR_PTR(-EEXIST); + + /* We got an error, but the call is attached to the socket and is in + * need of release. However, we might now race with recvmsg() when + * completing the call queues it. Return 0 from sys_sendmsg() and + * leave the error to recvmsg() to deal with. + */ +error_attached_to_socket: + trace_rxrpc_call(call->debug_id, rxrpc_call_error, + atomic_read(&call->usage), here, ERR_PTR(ret)); + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); + __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, + RX_CALL_DEAD, ret); + _leave(" = c=%08x [err]", call->debug_id); + return call; } /* diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index 19e141eeed17dca26e90450cc12dc8c3728b9798..8cbe0bf20ed533546805d87642ba5af698b13663 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -212,9 +212,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) call->peer->cong_cwnd = call->cong_cwnd; - spin_lock_bh(&conn->params.peer->lock); - hlist_del_rcu(&call->error_link); - spin_unlock_bh(&conn->params.peer->lock); + if (!hlist_unhashed(&call->error_link)) { + spin_lock_bh(&call->peer->lock); + hlist_del_rcu(&call->error_link); + spin_unlock_bh(&call->peer->lock); + } if (rxrpc_is_client_call(call)) return rxrpc_disconnect_client_call(call); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 2989742a4aa118eb433d1e1dfe8013b7c55c85f4..efecc5a8f67d74a4f1c2ee15d8e72fb3dbea9f41 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -543,7 +543,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, list_empty(&rx->recvmsg_q) && rx->sk.sk_state != RXRPC_SERVER_LISTENING) { release_sock(&rx->sk); - return -ENODATA; + return -EAGAIN; } if (list_empty(&rx->recvmsg_q)) { @@ -620,7 +620,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, goto error_unlock_call; } - if (msg->msg_name) { + if (msg->msg_name && call->peer) { struct sockaddr_rxrpc *srx = msg->msg_name; size_t len = sizeof(call->peer->srx); diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 1304b8608f56e4862143f4dd00020c2874b639f8..f3f6da6e4ad29cabd9c767ce63334555f8d952ac 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -304,7 +304,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, /* this should be in poll */ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); - if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) + if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; more = msg->msg_flags & MSG_MORE; @@ -681,6 +681,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (IS_ERR(call)) return PTR_ERR(call); /* ... and we have the call lock. */ + ret = 0; + if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) + goto out_put_unlock; } else { switch (READ_ONCE(call->state)) { case RXRPC_CALL_UNINITIALISED: diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 43a243081e7d2a86624d31029dae20957f78f11c..f901421b0634d4408d4d7cc6881fa5e1716a471a 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c @@ -43,17 +43,20 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a, tcf_lastuse_update(&ca->tcf_tm); bstats_update(&ca->tcf_bstats, skb); - if (skb->protocol == htons(ETH_P_IP)) { + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): if (skb->len < sizeof(struct iphdr)) goto out; proto = NFPROTO_IPV4; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + break; + case htons(ETH_P_IPV6): if (skb->len < sizeof(struct ipv6hdr)) goto out; proto = NFPROTO_IPV6; - } else { + break; + default: goto out; } diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index cb8608f0a77a2a88671da430c79399c1fab0d77d..c60674cf25c4fd5d30484f72cc23e974a285aa23 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -587,7 +587,7 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a, goto drop; update_flags = params->update_flags; - protocol = tc_skb_protocol(skb); + protocol = skb_protocol(skb, false); again: switch (protocol) { case cpu_to_be16(ETH_P_IP): diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index e9f3576cbf71ab702209337259f2105921497714..6ed1652d1e265b379cd8de9eed17e3495a40852f 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -624,7 +624,7 @@ static u8 tcf_ct_skb_nf_family(struct sk_buff *skb) { u8 family = NFPROTO_UNSPEC; - switch (skb->protocol) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): family = NFPROTO_IPV4; break; @@ -673,9 +673,10 @@ static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag) } static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, - u8 family, u16 zone) + u8 family, u16 zone, bool *defrag) { enum ip_conntrack_info ctinfo; + struct qdisc_skb_cb cb; struct nf_conn *ct; int err = 0; bool frag; @@ -693,6 +694,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, return err; skb_get(skb); + cb = *qdisc_skb_cb(skb); if (family == NFPROTO_IPV4) { enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; @@ -703,6 +705,9 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, local_bh_enable(); if (err && err != -EINPROGRESS) goto out_free; + + if (!err) + *defrag = true; } else { /* NFPROTO_IPV6 */ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; @@ -711,12 +716,16 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, err = nf_ct_frag6_gather(net, skb, user); if (err && err != -EINPROGRESS) goto out_free; + + if (!err) + *defrag = true; #else err = -EOPNOTSUPP; goto out_free; #endif } + *qdisc_skb_cb(skb) = cb; skb_clear_hash(skb); skb->ignore_df = 1; return err; @@ -748,6 +757,7 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, const struct nf_nat_range2 *range, enum nf_nat_manip_type maniptype) { + __be16 proto = skb_protocol(skb, true); int hooknum, err = NF_ACCEPT; /* See HOOK2MANIP(). */ @@ -759,14 +769,13 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, switch (ctinfo) { case IP_CT_RELATED: case IP_CT_RELATED_REPLY: - if (skb->protocol == htons(ETH_P_IP) && + if (proto == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_ICMP) { if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, hooknum)) err = NF_DROP; goto out; - } else if (IS_ENABLED(CONFIG_IPV6) && - skb->protocol == htons(ETH_P_IPV6)) { + } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) { __be16 frag_off; u8 nexthdr = ipv6_hdr(skb)->nexthdr; int hdrlen = ipv6_skip_exthdr(skb, @@ -914,6 +923,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, int nh_ofs, err, retval; struct tcf_ct_params *p; bool skip_add = false; + bool defrag = false; struct nf_conn *ct; u8 family; @@ -925,6 +935,8 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, force = p->ct_action & TCA_CT_ACT_FORCE; tmpl = p->tmpl; + tcf_lastuse_update(&c->tcf_tm); + if (clear) { ct = nf_ct_get(skb, &ctinfo); if (ct) { @@ -944,7 +956,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, */ nh_ofs = skb_network_offset(skb); skb_pull_rcsum(skb, nh_ofs); - err = tcf_ct_handle_fragments(net, skb, family, p->zone); + err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag); if (err == -EINPROGRESS) { retval = TC_ACT_STOLEN; goto out; @@ -1012,6 +1024,8 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, out: tcf_action_update_bstats(&c->common, skb); + if (defrag) + qdisc_skb_cb(skb)->pkt_len = skb->len; return retval; drop: @@ -1529,10 +1543,10 @@ static int __init ct_init_module(void) return 0; -err_tbl_init: - destroy_workqueue(act_ct_wq); err_register: tcf_ct_flow_tables_uninit(); +err_tbl_init: + destroy_workqueue(act_ct_wq); return err; } @@ -1550,4 +1564,3 @@ MODULE_AUTHOR("Yossi Kuperman "); MODULE_AUTHOR("Marcelo Ricardo Leitner "); MODULE_DESCRIPTION("Connection tracking action"); MODULE_LICENSE("GPL v2"); - diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c index 19649623493b158b3008c82ce2409ae80ffa6dc6..b5042f3ea079e2e4d77bb0786c3761ec5e93f7da 100644 --- a/net/sched/act_ctinfo.c +++ b/net/sched/act_ctinfo.c @@ -96,19 +96,22 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a, action = READ_ONCE(ca->tcf_action); wlen = skb_network_offset(skb); - if (tc_skb_protocol(skb) == htons(ETH_P_IP)) { + switch (skb_protocol(skb, true)) { + case htons(ETH_P_IP): wlen += sizeof(struct iphdr); if (!pskb_may_pull(skb, wlen)) goto out; proto = NFPROTO_IPV4; - } else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) { + break; + case htons(ETH_P_IPV6): wlen += sizeof(struct ipv6hdr); if (!pskb_may_pull(skb, wlen)) goto out; proto = NFPROTO_IPV6; - } else { + break; + default: goto out; } diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index be3f215cd0277b1a32e8da8634b5edd2f32b25af..8118e26409796aff8ccfe8569470b829dd6e6ad0 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -82,7 +82,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a, goto drop; break; case TCA_MPLS_ACT_PUSH: - new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol)); + new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true))); if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len, skb->dev && skb->dev->type == ARPHRD_ETHER)) goto drop; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index b125b2be4467a46c57208c21385a41bf722f6fc4..b2b3faa57294c5caed90461ea74be8038c5352ad 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -41,7 +41,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, if (params->flags & SKBEDIT_F_INHERITDSFIELD) { int wlen = skb_network_offset(skb); - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): wlen += sizeof(struct iphdr); if (!pskb_may_pull(skb, wlen)) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index faa78b7dd96220ba4e222757eef597e9e704750d..4619cb3cb0a8f51f04e008bb1cbdd25934bc0c06 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -1538,7 +1537,7 @@ static inline int __tcf_classify(struct sk_buff *skb, reclassify: #endif for (; tp; tp = rcu_dereference_bh(tp->next)) { - __be16 protocol = tc_skb_protocol(skb); + __be16 protocol = skb_protocol(skb, false); int err; if (tp->protocol != protocol && diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 80ae7b9fa90affd5bae5647aada9c556d1f2e65b..ab53a93b2f2ba94c5b8dbad647bc6f6f6a8e5465 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -80,7 +80,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) if (dst) return ntohl(dst); - return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); + return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true); } static u32 flow_get_proto(const struct sk_buff *skb, @@ -104,7 +104,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb, if (flow->ports.ports) return ntohs(flow->ports.dst); - return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); + return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true); } static u32 flow_get_iif(const struct sk_buff *skb) @@ -151,7 +151,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb) static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow) { - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): return ntohl(CTTUPLE(skb, src.u3.ip)); case htons(ETH_P_IPV6): @@ -164,7 +164,7 @@ static u32 flow_get_nfct_src(const struct sk_buff *skb, static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow) { - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): return ntohl(CTTUPLE(skb, dst.u3.ip)); case htons(ETH_P_IPV6): diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index b2da3728608225a05e40c8a8c1d48964aa944056..e30bd969fc485e7c018463a7333949f32ff055ab 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -313,7 +313,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, /* skb_flow_dissect() does not set n_proto in case an unknown * protocol, so do it rather here. */ - skb_key.basic.n_proto = skb->protocol; + skb_key.basic.n_proto = skb_protocol(skb, false); skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, fl_ct_info_to_flower_map, diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c index df00566d327de89788a00c96a4db693415b8ded4..c95cf86fb431ab8e4c82a0914fa3bbc401e17c6d 100644 --- a/net/sched/em_ipset.c +++ b/net/sched/em_ipset.c @@ -59,7 +59,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em, }; int ret, network_offset; - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): state.pf = NFPROTO_IPV4; if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) diff --git a/net/sched/em_ipt.c b/net/sched/em_ipt.c index 18755d29fd1594040dfea24387be8b48c8350241..3650117da47f129df4e5e5b01b68ca0dec2bc7d6 100644 --- a/net/sched/em_ipt.c +++ b/net/sched/em_ipt.c @@ -212,7 +212,7 @@ static int em_ipt_match(struct sk_buff *skb, struct tcf_ematch *em, struct nf_hook_state state; int ret; - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) return 0; diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index d99966a55c84fa0f5142ed72faeceb9baab86f5e..46254968d390fc50a1b77c1bf94971ded5233142 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -195,7 +195,7 @@ META_COLLECTOR(int_priority) META_COLLECTOR(int_protocol) { /* Let userspace take care of the byte ordering */ - dst->value = tc_skb_protocol(skb); + dst->value = skb_protocol(skb, false); } META_COLLECTOR(int_pkttype) diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index ee12ca9f55b4fd59a6c0b826ad961ac3984c3ba2..1c281cc81f57789b787c77622cc2c026414f9c9d 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -553,16 +553,16 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt, if (!p->link.q) p->link.q = &noop_qdisc; pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); + p->link.vcc = NULL; + p->link.sock = NULL; + p->link.common.classid = sch->handle; + p->link.ref = 1; err = tcf_block_get(&p->link.block, &p->link.filter_list, sch, extack); if (err) return err; - p->link.vcc = NULL; - p->link.sock = NULL; - p->link.common.classid = sch->handle; - p->link.ref = 1; tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); return 0; } diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index ca813697728eb795ff0fcd97c63cbe5d64c9f925..ebaeec1e5c82d99b25ac8bdfe24a878f52469e4b 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -592,7 +592,7 @@ static bool cake_update_flowkeys(struct flow_keys *keys, bool rev = !skb->_nfct, upd = false; __be32 ip; - if (tc_skb_protocol(skb) != htons(ETH_P_IP)) + if (skb_protocol(skb, true) != htons(ETH_P_IP)) return false; if (!nf_ct_get_tuple_skb(&tuple, skb)) @@ -1557,7 +1557,7 @@ static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash) u16 *buf, buf_; u8 dscp; - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_); if (unlikely(!buf)) diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 05605b30bef3abac1da2e0e821c871a54b3635ba..2b88710994d71e5339f82ef3fe53ecce961ea42f 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -210,7 +210,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (p->set_tc_index) { int wlen = skb_network_offset(skb); - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): wlen += sizeof(struct iphdr); if (!pskb_may_pull(skb, wlen) || @@ -303,7 +303,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) index = skb->tc_index & (p->indices - 1); pr_debug("index %d->%d\n", skb->tc_index, index); - switch (tc_skb_protocol(skb)) { + switch (skb_protocol(skb, true)) { case htons(ETH_P_IP): ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask, p->mv[index].value); @@ -320,7 +320,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) */ if (p->mv[index].mask != 0xff || p->mv[index].value) pr_warn("%s: unsupported protocol %d\n", - __func__, ntohs(tc_skb_protocol(skb))); + __func__, ntohs(skb_protocol(skb, true))); break; } diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 689ef6f3ded80968a46aaccc9dc6fd4db9506a85..2f1f0a3784083088bf9cfeb3b4c84e1391fb9e54 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -239,7 +239,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, char haddr[MAX_ADDR_LEN]; neigh_ha_snapshot(haddr, n, dev); - err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)), + err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)), haddr, NULL, skb->len); if (err < 0) diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 67f7e71f91298811bfecea8070bc73c427a80872..bda2536dd740f7efe7b19dfc17cfc939bae38f1f 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -22,17 +22,11 @@ #include #include -/* Migrates chunks from stream queues to new stream queues if needed, - * but not across associations. Also, removes those chunks to streams - * higher than the new max. - */ -static void sctp_stream_outq_migrate(struct sctp_stream *stream, - struct sctp_stream *new, __u16 outcnt) +static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt) { struct sctp_association *asoc; struct sctp_chunk *ch, *temp; struct sctp_outq *outq; - int i; asoc = container_of(stream, struct sctp_association, stream); outq = &asoc->outqueue; @@ -56,6 +50,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, sctp_chunk_free(ch); } +} + +/* Migrates chunks from stream queues to new stream queues if needed, + * but not across associations. Also, removes those chunks to streams + * higher than the new max. + */ +static void sctp_stream_outq_migrate(struct sctp_stream *stream, + struct sctp_stream *new, __u16 outcnt) +{ + int i; + + if (stream->outcnt > outcnt) + sctp_stream_shrink_out(stream, outcnt); if (new) { /* Here we actually move the old ext stuff into the new @@ -1037,11 +1044,13 @@ struct sctp_chunk *sctp_process_strreset_resp( nums = ntohs(addstrm->number_of_streams); number = stream->outcnt - nums; - if (result == SCTP_STRRESET_PERFORMED) + if (result == SCTP_STRRESET_PERFORMED) { for (i = number; i < stream->outcnt; i++) SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; - else + } else { + sctp_stream_shrink_out(stream, number); stream->outcnt = number; + } *evp = sctp_ulpevent_make_stream_change_event(asoc, flags, 0, nums, GFP_ATOMIC); diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 9033215438384b8f4a8b9872184c082d58c5afeb..1163d51196da996dac190701b9f31618a4406e46 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -126,8 +126,10 @@ EXPORT_SYMBOL_GPL(smc_proto6); static void smc_restore_fallback_changes(struct smc_sock *smc) { - smc->clcsock->file->private_data = smc->sk.sk_socket; - smc->clcsock->file = NULL; + if (smc->clcsock->file) { /* non-accepted sockets have no file yet */ + smc->clcsock->file->private_data = smc->sk.sk_socket; + smc->clcsock->file = NULL; + } } static int __smc_release(struct smc_sock *smc) @@ -352,7 +354,7 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link, */ mutex_lock(&lgr->llc_conf_mutex); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (lgr->lnk[i].state != SMC_LNK_ACTIVE) + if (!smc_link_active(&lgr->lnk[i])) continue; rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc); if (rc) @@ -632,7 +634,9 @@ static int smc_connect_rdma(struct smc_sock *smc, for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *l = &smc->conn.lgr->lnk[i]; - if (l->peer_qpn == ntoh24(aclc->qpn)) { + if (l->peer_qpn == ntoh24(aclc->qpn) && + !memcmp(l->peer_gid, &aclc->lcl.gid, SMC_GID_SIZE) && + !memcmp(l->peer_mac, &aclc->lcl.mac, sizeof(l->peer_mac))) { link = l; break; } diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index a47e8855e0454594e7726989c72979e314c14bc0..ce468ff62a1915dc117f75ef2eb24ee3e45b7e07 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -66,9 +66,13 @@ int smc_cdc_get_free_slot(struct smc_connection *conn, rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, wr_rdma_buf, (struct smc_wr_tx_pend_priv **)pend); - if (conn->killed) + if (conn->killed) { /* abnormal termination */ + if (!rc) + smc_wr_tx_put_slot(link, + (struct smc_wr_tx_pend_priv *)pend); rc = -EPIPE; + } return rc; } diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index d5627df24215d934d4a2ce55619f5e863d73589b..779f4142a11d8b377758586070e1b881baaf4379 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -27,6 +27,7 @@ #define SMCR_CLC_ACCEPT_CONFIRM_LEN 68 #define SMCD_CLC_ACCEPT_CONFIRM_LEN 48 +#define SMC_CLC_RECV_BUF_LEN 100 /* eye catcher "SMCR" EBCDIC for CLC messages */ static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'}; @@ -36,7 +37,7 @@ static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'}; /* check if received message has a correct header length and contains valid * heading and trailing eyecatchers */ -static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm) +static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl) { struct smc_clc_msg_proposal_prefix *pclc_prfx; struct smc_clc_msg_accept_confirm *clc; @@ -49,12 +50,9 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm) return false; switch (clcm->type) { case SMC_CLC_PROPOSAL: - if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D && - clcm->path != SMC_TYPE_B) - return false; pclc = (struct smc_clc_msg_proposal *)clcm; pclc_prfx = smc_clc_proposal_get_prefix(pclc); - if (ntohs(pclc->hdr.length) != + if (ntohs(pclc->hdr.length) < sizeof(*pclc) + ntohs(pclc->iparea_offset) + sizeof(*pclc_prfx) + pclc_prfx->ipv6_prefixes_cnt * @@ -86,7 +84,8 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm) default: return false; } - if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && + if (check_trl && + memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) && memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER))) return false; return true; @@ -276,7 +275,8 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, struct msghdr msg = {NULL, 0}; int reason_code = 0; struct kvec vec = {buf, buflen}; - int len, datlen; + int len, datlen, recvlen; + bool check_trl = true; int krflags; /* peek the first few bytes to determine length of data to receive @@ -320,10 +320,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, } datlen = ntohs(clcm->length); if ((len < sizeof(struct smc_clc_msg_hdr)) || - (datlen > buflen) || - (clcm->version != SMC_CLC_V1) || - (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D && - clcm->path != SMC_TYPE_B) || + (clcm->version < SMC_CLC_V1) || ((clcm->type != SMC_CLC_DECLINE) && (clcm->type != expected_type))) { smc->sk.sk_err = EPROTO; @@ -331,16 +328,38 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, goto out; } + if (clcm->type == SMC_CLC_PROPOSAL && clcm->path == SMC_TYPE_N) + reason_code = SMC_CLC_DECL_VERSMISMAT; /* just V2 offered */ + /* receive the complete CLC message */ memset(&msg, 0, sizeof(struct msghdr)); - iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen); + if (datlen > buflen) { + check_trl = false; + recvlen = buflen; + } else { + recvlen = datlen; + } + iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen); krflags = MSG_WAITALL; len = sock_recvmsg(smc->clcsock, &msg, krflags); - if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) { + if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) { smc->sk.sk_err = EPROTO; reason_code = -EPROTO; goto out; } + datlen -= len; + while (datlen) { + u8 tmp[SMC_CLC_RECV_BUF_LEN]; + + vec.iov_base = &tmp; + vec.iov_len = SMC_CLC_RECV_BUF_LEN; + /* receive remaining proposal message */ + recvlen = datlen > SMC_CLC_RECV_BUF_LEN ? + SMC_CLC_RECV_BUF_LEN : datlen; + iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen); + len = sock_recvmsg(smc->clcsock, &msg, krflags); + datlen -= len; + } if (clcm->type == SMC_CLC_DECLINE) { struct smc_clc_msg_decline *dclc; diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 465876701b7556aedb32dfc85f2fba0f8ecb738f..76c2b150d040f3b4a10ae4f6e6c8d922a3107443 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -25,6 +25,7 @@ #define SMC_CLC_V1 0x1 /* SMC version */ #define SMC_TYPE_R 0 /* SMC-R only */ #define SMC_TYPE_D 1 /* SMC-D only */ +#define SMC_TYPE_N 2 /* neither SMC-R nor SMC-D */ #define SMC_TYPE_B 3 /* SMC-R and SMC-D */ #define CLC_WAIT_TIME (6 * HZ) /* max. wait time on clcsock */ #define CLC_WAIT_TIME_SHORT HZ /* short wait time on clcsock */ @@ -46,6 +47,7 @@ #define SMC_CLC_DECL_ISMVLANERR 0x03090000 /* err to reg vlan id on ism dev */ #define SMC_CLC_DECL_NOACTLINK 0x030a0000 /* no active smc-r link in lgr */ #define SMC_CLC_DECL_NOSRVLINK 0x030b0000 /* SMC-R link from srv not found */ +#define SMC_CLC_DECL_VERSMISMAT 0x030c0000 /* SMC version mismatch */ #define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */ #define SMC_CLC_DECL_PEERDECL 0x05000000 /* peer declined during handshake */ #define SMC_CLC_DECL_INTERR 0x09990000 /* internal error */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 7964a21e5e6fb51aef1abeeee2ae48c4c2ff8a8d..f82a2e59991719b54c67ca3c61ce607fdf76d5c6 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -44,18 +45,10 @@ static struct smc_lgr_list smc_lgr_list = { /* established link groups */ static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */ static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted); -struct smc_ib_up_work { - struct work_struct work; - struct smc_link_group *lgr; - struct smc_ib_device *smcibdev; - u8 ibport; -}; - static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb, struct smc_buf_desc *buf_desc); static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft); -static void smc_link_up_work(struct work_struct *work); static void smc_link_down_work(struct work_struct *work); /* return head of link group list and its lock for a given link group */ @@ -247,7 +240,8 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr) if (smc_link_usable(lnk)) lnk->state = SMC_LNK_INACTIVE; } - wake_up_interruptible_all(&lgr->llc_waiter); + wake_up_all(&lgr->llc_msg_waiter); + wake_up_all(&lgr->llc_flow_waiter); } static void smc_lgr_free(struct smc_link_group *lgr); @@ -324,7 +318,6 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, get_device(&ini->ib_dev->ibdev->dev); atomic_inc(&ini->ib_dev->lnk_cnt); - lnk->state = SMC_LNK_ACTIVATING; lnk->link_id = smcr_next_link_id(lgr); lnk->lgr = lgr; lnk->link_idx = link_idx; @@ -360,6 +353,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, rc = smc_wr_create_link(lnk); if (rc) goto destroy_qp; + lnk->state = SMC_LNK_ACTIVATING; return 0; destroy_qp: @@ -450,7 +444,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) } smc->conn.lgr = lgr; spin_lock_bh(lgr_lock); - list_add(&lgr->list, lgr_list); + list_add_tail(&lgr->list, lgr_list); spin_unlock_bh(lgr_lock); return 0; @@ -548,8 +542,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr, smc_wr_wakeup_tx_wait(from_lnk); for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (lgr->lnk[i].state != SMC_LNK_ACTIVE || - i == from_lnk->link_idx) + if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx) continue; if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev && from_lnk->ibport == lgr->lnk[i].ibport) { @@ -1104,66 +1097,23 @@ static void smc_conn_abort_work(struct work_struct *work) sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */ } -/* link is up - establish alternate link if applicable */ -static void smcr_link_up(struct smc_link_group *lgr, - struct smc_ib_device *smcibdev, u8 ibport) -{ - struct smc_link *link = NULL; - - if (list_empty(&lgr->list) || - lgr->type == SMC_LGR_SYMMETRIC || - lgr->type == SMC_LGR_ASYMMETRIC_PEER) - return; - - if (lgr->role == SMC_SERV) { - /* trigger local add link processing */ - link = smc_llc_usable_link(lgr); - if (!link) - return; - smc_llc_srv_add_link_local(link); - } else { - /* invite server to start add link processing */ - u8 gid[SMC_GID_SIZE]; - - if (smc_ib_determine_gid(smcibdev, ibport, lgr->vlan_id, gid, - NULL)) - return; - if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { - /* some other llc task is ongoing */ - wait_event_interruptible_timeout(lgr->llc_waiter, - (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE), - SMC_LLC_WAIT_TIME); - } - if (list_empty(&lgr->list) || - !smc_ib_port_active(smcibdev, ibport)) - return; /* lgr or device no longer active */ - link = smc_llc_usable_link(lgr); - if (!link) - return; - smc_llc_send_add_link(link, smcibdev->mac[ibport - 1], gid, - NULL, SMC_LLC_REQ); - } -} - void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport) { - struct smc_ib_up_work *ib_work; struct smc_link_group *lgr, *n; list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) { + struct smc_link *link; + if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id, SMC_MAX_PNETID_LEN) || lgr->type == SMC_LGR_SYMMETRIC || lgr->type == SMC_LGR_ASYMMETRIC_PEER) continue; - ib_work = kmalloc(sizeof(*ib_work), GFP_KERNEL); - if (!ib_work) - continue; - INIT_WORK(&ib_work->work, smc_link_up_work); - ib_work->lgr = lgr; - ib_work->smcibdev = smcibdev; - ib_work->ibport = ibport; - schedule_work(&ib_work->work); + + /* trigger local add link processing */ + link = smc_llc_usable_link(lgr); + if (link) + smc_llc_add_link_local(link); } } @@ -1195,13 +1145,19 @@ static void smcr_link_down(struct smc_link *lnk) if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { /* another llc task is ongoing */ mutex_unlock(&lgr->llc_conf_mutex); - wait_event_interruptible_timeout(lgr->llc_waiter, - (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE), + wait_event_timeout(lgr->llc_flow_waiter, + (list_empty(&lgr->list) || + lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE), SMC_LLC_WAIT_TIME); mutex_lock(&lgr->llc_conf_mutex); } - smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true, - SMC_LLC_DEL_LOST_PATH); + if (!list_empty(&lgr->list)) { + smc_llc_send_delete_link(to_lnk, del_link_id, + SMC_LLC_REQ, true, + SMC_LLC_DEL_LOST_PATH); + smcr_link_clear(lnk, true); + } + wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */ } } @@ -1240,20 +1196,6 @@ void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport) } } -static void smc_link_up_work(struct work_struct *work) -{ - struct smc_ib_up_work *ib_work = container_of(work, - struct smc_ib_up_work, - work); - struct smc_link_group *lgr = ib_work->lgr; - - if (list_empty(&lgr->list)) - goto out; - smcr_link_up(lgr, ib_work->smcibdev, ib_work->ibport); -out: - kfree(ib_work); -} - static void smc_link_down_work(struct work_struct *work) { struct smc_link *link = container_of(work, struct smc_link, @@ -1262,7 +1204,7 @@ static void smc_link_down_work(struct work_struct *work) if (list_empty(&lgr->list)) return; - wake_up_interruptible_all(&lgr->llc_waiter); + wake_up_all(&lgr->llc_msg_waiter); mutex_lock(&lgr->llc_conf_mutex); smcr_link_down(link); mutex_unlock(&lgr->llc_conf_mutex); @@ -1326,7 +1268,7 @@ static bool smcr_lgr_match(struct smc_link_group *lgr, return false; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (lgr->lnk[i].state != SMC_LNK_ACTIVE) + if (!smc_link_active(&lgr->lnk[i])) continue; if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) && !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) && @@ -1369,7 +1311,7 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini) smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) && !lgr->sync_err && lgr->vlan_id == ini->vlan_id && - (role == SMC_CLNT || + (role == SMC_CLNT || ini->is_smcd || lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) { /* link group found */ ini->cln_first_contact = SMC_REUSE_CONTACT; @@ -1774,14 +1716,14 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn) { - if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk)) + if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk)) return; smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE); } void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn) { - if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk)) + if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk)) return; smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE); } @@ -1793,7 +1735,7 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn) if (!conn->lgr || conn->lgr->is_smcd) return; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (!smc_link_usable(&conn->lgr->lnk[i])) + if (!smc_link_active(&conn->lgr->lnk[i])) continue; smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc, DMA_FROM_DEVICE); @@ -1807,7 +1749,7 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn) if (!conn->lgr || conn->lgr->is_smcd) return; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (!smc_link_usable(&conn->lgr->lnk[i])) + if (!smc_link_active(&conn->lgr->lnk[i])) continue; smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc, DMA_FROM_DEVICE); @@ -1830,8 +1772,12 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd) return rc; /* create rmb */ rc = __smc_buf_create(smc, is_smcd, true); - if (rc) + if (rc) { + mutex_lock(&smc->conn.lgr->sndbufs_lock); + list_del(&smc->conn.sndbuf_desc->list); + mutex_unlock(&smc->conn.lgr->sndbufs_lock); smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc); + } return rc; } @@ -1955,20 +1901,20 @@ static void smc_core_going_away(void) struct smc_ib_device *smcibdev; struct smcd_dev *smcd; - spin_lock(&smc_ib_devices.lock); + mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(smcibdev, &smc_ib_devices.list, list) { int i; for (i = 0; i < SMC_MAX_PORTS; i++) set_bit(i, smcibdev->ports_going_away); } - spin_unlock(&smc_ib_devices.lock); + mutex_unlock(&smc_ib_devices.mutex); - spin_lock(&smcd_dev_list.lock); + mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd, &smcd_dev_list.list, list) { smcd->going_away = 1; } - spin_unlock(&smcd_dev_list.lock); + mutex_unlock(&smcd_dev_list.mutex); } /* Clean up all SMC link groups */ @@ -1980,10 +1926,10 @@ static void smc_lgrs_shutdown(void) smc_smcr_terminate_all(NULL); - spin_lock(&smcd_dev_list.lock); + mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd, &smcd_dev_list.list, list) smc_smcd_terminate_all(smcd); - spin_unlock(&smcd_dev_list.lock); + mutex_unlock(&smcd_dev_list.mutex); } static int smc_core_reboot_event(struct notifier_block *this, diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 86d160f0d187b450426f0623d812acd1be37cab8..1c4d5439d0ff283208ab92c0249abfe72075e216 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -262,8 +262,10 @@ struct smc_link_group { struct work_struct llc_del_link_work; struct work_struct llc_event_work; /* llc event worker */ - wait_queue_head_t llc_waiter; + wait_queue_head_t llc_flow_waiter; /* w4 next llc event */ + wait_queue_head_t llc_msg_waiter; + /* w4 next llc msg */ struct smc_llc_flow llc_flow_lcl; /* llc local control field */ struct smc_llc_flow llc_flow_rmt; @@ -347,6 +349,11 @@ static inline bool smc_link_usable(struct smc_link *lnk) return true; } +static inline bool smc_link_active(struct smc_link *lnk) +{ + return lnk->state == SMC_LNK_ACTIVE; +} + struct smc_sock; struct smc_clc_msg_accept_confirm; struct smc_clc_msg_local; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 562a52d01ad161749cb9bbd5a9398416bd292e55..1c314dbdc7faa450ead0cf45279f87a97f26f603 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -33,7 +34,7 @@ #define SMC_QP_RNR_RETRY 7 /* 7: infinite */ struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ - .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock), + .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex), .list = LIST_HEAD_INIT(smc_ib_devices.list), }; @@ -505,6 +506,10 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) int cqe_size_order, smc_order; long rc; + mutex_lock(&smcibdev->mutex); + rc = 0; + if (smcibdev->initialized) + goto out; /* the calculated number of cq entries fits to mlx5 cq allocation */ cqe_size_order = cache_line_size() == 128 ? 7 : 6; smc_order = MAX_ORDER - cqe_size_order - 1; @@ -516,7 +521,7 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send); if (IS_ERR(smcibdev->roce_cq_send)) { smcibdev->roce_cq_send = NULL; - return rc; + goto out; } smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev, smc_wr_rx_cq_handler, NULL, @@ -528,21 +533,26 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) } smc_wr_add_dev(smcibdev); smcibdev->initialized = 1; - return rc; + goto out; err: ib_destroy_cq(smcibdev->roce_cq_send); +out: + mutex_unlock(&smcibdev->mutex); return rc; } static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) { + mutex_lock(&smcibdev->mutex); if (!smcibdev->initialized) - return; + goto out; smcibdev->initialized = 0; ib_destroy_cq(smcibdev->roce_cq_recv); ib_destroy_cq(smcibdev->roce_cq_send); smc_wr_remove_dev(smcibdev); +out: + mutex_unlock(&smcibdev->mutex); } static struct ib_client smc_ib_client; @@ -565,9 +575,10 @@ static int smc_ib_add_dev(struct ib_device *ibdev) INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); atomic_set(&smcibdev->lnk_cnt, 0); init_waitqueue_head(&smcibdev->lnks_deleted); - spin_lock(&smc_ib_devices.lock); + mutex_init(&smcibdev->mutex); + mutex_lock(&smc_ib_devices.mutex); list_add_tail(&smcibdev->list, &smc_ib_devices.list); - spin_unlock(&smc_ib_devices.lock); + mutex_unlock(&smc_ib_devices.mutex); ib_set_client_data(ibdev, &smc_ib_client, smcibdev); INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, smc_ib_global_event_handler); @@ -602,9 +613,9 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) { struct smc_ib_device *smcibdev = client_data; - spin_lock(&smc_ib_devices.lock); + mutex_lock(&smc_ib_devices.mutex); list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ - spin_unlock(&smc_ib_devices.lock); + mutex_unlock(&smc_ib_devices.mutex); pr_warn_ratelimited("smc: removing ib device %s\n", smcibdev->ibdev->name); smc_smcr_terminate_all(smcibdev); diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index e6a696ae15f3e51fe1d72ab8f0c271324532aecb..2ce481187dd0bec5bfb5a339bcac11c5eadbb553 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -25,7 +26,7 @@ struct smc_ib_devices { /* list of smc ib devices definition */ struct list_head list; - spinlock_t lock; /* protects list of smc ib devices */ + struct mutex mutex; /* protects list of smc ib devices */ }; extern struct smc_ib_devices smc_ib_devices; /* list of smc ib devices */ @@ -51,6 +52,7 @@ struct smc_ib_device { /* ib-device infos for smc */ DECLARE_BITMAP(ports_going_away, SMC_MAX_PORTS); atomic_t lnk_cnt; /* number of links on ibdev */ wait_queue_head_t lnks_deleted; /* wait 4 removal of all links*/ + struct mutex mutex; /* protect dev setup+cleanup */ }; struct smc_buf_desc; diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index 91f85fc09fb8dacf2d87b118efedba9f6f558e0e..998c525de785690426929732bdfc73f32dc731b0 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c @@ -7,6 +7,7 @@ */ #include +#include #include #include @@ -17,7 +18,7 @@ struct smcd_dev_list smcd_dev_list = { .list = LIST_HEAD_INIT(smcd_dev_list.list), - .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock) + .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex) }; /* Test if an ISM communication is possible. */ @@ -317,9 +318,9 @@ EXPORT_SYMBOL_GPL(smcd_alloc_dev); int smcd_register_dev(struct smcd_dev *smcd) { - spin_lock(&smcd_dev_list.lock); + mutex_lock(&smcd_dev_list.mutex); list_add_tail(&smcd->list, &smcd_dev_list.list); - spin_unlock(&smcd_dev_list.lock); + mutex_unlock(&smcd_dev_list.mutex); pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n", dev_name(&smcd->dev), smcd->pnetid, @@ -333,9 +334,9 @@ void smcd_unregister_dev(struct smcd_dev *smcd) { pr_warn_ratelimited("smc: removing smcd device %s\n", dev_name(&smcd->dev)); - spin_lock(&smcd_dev_list.lock); + mutex_lock(&smcd_dev_list.mutex); list_del_init(&smcd->list); - spin_unlock(&smcd_dev_list.lock); + mutex_unlock(&smcd_dev_list.mutex); smcd->going_away = 1; smc_smcd_terminate_all(smcd); flush_workqueue(smcd->event_wq); diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h index 4da946cbfa29c2d7c233f16c983eaf4b738bd6bb..81cc4537efd38592cf26e01c809f1752e48489bf 100644 --- a/net/smc/smc_ism.h +++ b/net/smc/smc_ism.h @@ -10,12 +10,13 @@ #define SMCD_ISM_H #include +#include #include "smc.h" struct smcd_dev_list { /* List of SMCD devices */ struct list_head list; - spinlock_t lock; /* Protects list of devices */ + struct mutex mutex; /* Protects list of devices */ }; extern struct smcd_dev_list smcd_dev_list; /* list of smcd devices */ diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 391237b601fed2e41f6fde6aa3bea34bebf180c8..df5b0a6ea8488ffb53fb5a4aa5b564d557ee4400 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -186,6 +186,26 @@ static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow, flow->qentry = qentry; } +static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type, + struct smc_llc_qentry *qentry) +{ + u8 msg_type = qentry->msg.raw.hdr.common.type; + + if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) && + flow_type != msg_type && !lgr->delayed_event) { + lgr->delayed_event = qentry; + return; + } + /* drop parallel or already-in-progress llc requests */ + if (flow_type != msg_type) + pr_warn_once("smc: SMC-R lg %*phN dropped parallel " + "LLC msg: msg %d flow %d role %d\n", + SMC_LGR_ID_SIZE, &lgr->id, + qentry->msg.raw.hdr.common.type, + flow_type, lgr->role); + kfree(qentry); +} + /* try to start a new llc flow, initiated by an incoming llc msg */ static bool smc_llc_flow_start(struct smc_llc_flow *flow, struct smc_llc_qentry *qentry) @@ -195,14 +215,7 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow, spin_lock_bh(&lgr->llc_flow_lock); if (flow->type) { /* a flow is already active */ - if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK || - qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) && - !lgr->delayed_event) { - lgr->delayed_event = qentry; - } else { - /* forget this llc request */ - kfree(qentry); - } + smc_llc_flow_parallel(lgr, flow->type, qentry); spin_unlock_bh(&lgr->llc_flow_lock); return false; } @@ -222,8 +235,8 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow, } if (qentry == lgr->delayed_event) lgr->delayed_event = NULL; - spin_unlock_bh(&lgr->llc_flow_lock); smc_llc_flow_qentry_set(flow, qentry); + spin_unlock_bh(&lgr->llc_flow_lock); return true; } @@ -251,11 +264,11 @@ int smc_llc_flow_initiate(struct smc_link_group *lgr, return 0; } spin_unlock_bh(&lgr->llc_flow_lock); - rc = wait_event_interruptible_timeout(lgr->llc_waiter, - (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE && - (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE || - lgr->llc_flow_rmt.type == allowed_remote)), - SMC_LLC_WAIT_TIME); + rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) || + (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE && + (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE || + lgr->llc_flow_rmt.type == allowed_remote))), + SMC_LLC_WAIT_TIME * 10); if (!rc) return -ETIMEDOUT; goto again; @@ -272,7 +285,7 @@ void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow) flow == &lgr->llc_flow_lcl) schedule_work(&lgr->llc_event_work); else - wake_up_interruptible(&lgr->llc_waiter); + wake_up(&lgr->llc_flow_waiter); } /* lnk is optional and used for early wakeup when link goes down, useful in @@ -283,26 +296,32 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr, int time_out, u8 exp_msg) { struct smc_llc_flow *flow = &lgr->llc_flow_lcl; + u8 rcv_msg; - wait_event_interruptible_timeout(lgr->llc_waiter, - (flow->qentry || - (lnk && !smc_link_usable(lnk)) || - list_empty(&lgr->list)), - time_out); + wait_event_timeout(lgr->llc_msg_waiter, + (flow->qentry || + (lnk && !smc_link_usable(lnk)) || + list_empty(&lgr->list)), + time_out); if (!flow->qentry || (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) { smc_llc_flow_qentry_del(flow); goto out; } - if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) { + rcv_msg = flow->qentry->msg.raw.hdr.common.type; + if (exp_msg && rcv_msg != exp_msg) { if (exp_msg == SMC_LLC_ADD_LINK && - flow->qentry->msg.raw.hdr.common.type == - SMC_LLC_DELETE_LINK) { + rcv_msg == SMC_LLC_DELETE_LINK) { /* flow_start will delay the unexpected msg */ smc_llc_flow_start(&lgr->llc_flow_lcl, smc_llc_flow_qentry_clr(flow)); return NULL; } + pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: " + "msg %d exp %d flow %d role %d flags %x\n", + SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg, + flow->type, lgr->role, + flow->qentry->msg.raw.hdr.flags); smc_llc_flow_qentry_del(flow); } out: @@ -409,7 +428,7 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link, rtok_ix = 1; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { link = &send_link->lgr->lnk[i]; - if (link->state == SMC_LNK_ACTIVE && link != send_link) { + if (smc_link_active(link) && link != send_link) { rkeyllc->rtoken[rtok_ix].link_id = link->link_id; rkeyllc->rtoken[rtok_ix].rmb_key = htonl(rmb_desc->mr_rx[link->link_idx]->rkey); @@ -876,6 +895,36 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) return rc; } +/* as an SMC client, invite server to start the add_link processing */ +static void smc_llc_cli_add_link_invite(struct smc_link *link, + struct smc_llc_qentry *qentry) +{ + struct smc_link_group *lgr = smc_get_lgr(link); + struct smc_init_info ini; + + if (lgr->type == SMC_LGR_SYMMETRIC || + lgr->type == SMC_LGR_ASYMMETRIC_PEER) + goto out; + + ini.vlan_id = lgr->vlan_id; + smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); + if (!ini.ib_dev) + goto out; + + smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1], + ini.ib_gid, NULL, SMC_LLC_REQ); +out: + kfree(qentry); +} + +static bool smc_llc_is_local_add_link(union smc_llc_msg *llc) +{ + if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK && + !llc->add_link.qp_mtu && !llc->add_link.link_num) + return true; + return false; +} + static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) { struct smc_llc_qentry *qentry; @@ -883,7 +932,10 @@ static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); mutex_lock(&lgr->llc_conf_mutex); - smc_llc_cli_add_link(qentry->link, qentry); + if (smc_llc_is_local_add_link(&qentry->msg)) + smc_llc_cli_add_link_invite(qentry->link, qentry); + else + smc_llc_cli_add_link(qentry->link, qentry); mutex_unlock(&lgr->llc_conf_mutex); } @@ -892,7 +944,7 @@ static int smc_llc_active_link_count(struct smc_link_group *lgr) int i, link_count = 0; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (!smc_link_usable(&lgr->lnk[i])) + if (!smc_link_active(&lgr->lnk[i])) continue; link_count++; } @@ -1032,12 +1084,14 @@ static int smc_llc_srv_conf_link(struct smc_link *link, if (rc) return -ENOLINK; /* receive CONFIRM LINK response over the RoCE fabric */ - qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, - SMC_LLC_CONFIRM_LINK); - if (!qentry) { + qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0); + if (!qentry || + qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) { /* send DELETE LINK */ smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, false, SMC_LLC_DEL_LOST_PATH); + if (qentry) + smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); return -ENOLINK; } smc_llc_save_peer_uid(qentry); @@ -1139,14 +1193,14 @@ static void smc_llc_process_srv_add_link(struct smc_link_group *lgr) mutex_unlock(&lgr->llc_conf_mutex); } -/* enqueue a local add_link req to trigger a new add_link flow, only as SERV */ -void smc_llc_srv_add_link_local(struct smc_link *link) +/* enqueue a local add_link req to trigger a new add_link flow */ +void smc_llc_add_link_local(struct smc_link *link) { struct smc_llc_msg_add_link add_llc = {0}; add_llc.hd.length = sizeof(add_llc); add_llc.hd.common.type = SMC_LLC_ADD_LINK; - /* no dev and port needed, we as server ignore client data anyway */ + /* no dev and port needed */ smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc); } @@ -1222,8 +1276,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr) smc_llc_send_message(lnk, &qentry->msg); /* response */ if (smc_link_downing(&lnk_del->state)) { - smc_switch_conns(lgr, lnk_del, false); - smc_wr_tx_wait_no_pending_sends(lnk_del); + if (smc_switch_conns(lgr, lnk_del, false)) + smc_wr_tx_wait_no_pending_sends(lnk_del); } smcr_link_clear(lnk_del, true); @@ -1297,8 +1351,8 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr) goto out; /* asymmetric link already deleted */ if (smc_link_downing(&lnk_del->state)) { - smc_switch_conns(lgr, lnk_del, false); - smc_wr_tx_wait_no_pending_sends(lnk_del); + if (smc_switch_conns(lgr, lnk_del, false)) + smc_wr_tx_wait_no_pending_sends(lnk_del); } if (!list_empty(&lgr->list)) { /* qentry is either a request from peer (send it back to @@ -1326,7 +1380,7 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr) if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) { /* trigger setup of asymm alt link */ - smc_llc_srv_add_link_local(lnk); + smc_llc_add_link_local(lnk); } out: mutex_unlock(&lgr->llc_conf_mutex); @@ -1455,11 +1509,22 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry) if (list_empty(&lgr->list)) goto out; /* lgr is terminating */ if (lgr->role == SMC_CLNT) { - if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) { + if (smc_llc_is_local_add_link(llc)) { + if (lgr->llc_flow_lcl.type == + SMC_LLC_FLOW_ADD_LINK) + break; /* add_link in progress */ + if (smc_llc_flow_start(&lgr->llc_flow_lcl, + qentry)) { + schedule_work(&lgr->llc_add_link_work); + } + return; + } + if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && + !lgr->llc_flow_lcl.qentry) { /* a flow is waiting for this message */ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); - wake_up_interruptible(&lgr->llc_waiter); + wake_up(&lgr->llc_msg_waiter); } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { schedule_work(&lgr->llc_add_link_work); @@ -1474,33 +1539,18 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry) if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { /* a flow is waiting for this message */ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); - wake_up_interruptible(&lgr->llc_waiter); + wake_up(&lgr->llc_msg_waiter); return; } break; case SMC_LLC_DELETE_LINK: - if (lgr->role == SMC_CLNT) { - /* server requests to delete this link, send response */ - if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { - /* DEL LINK REQ during ADD LINK SEQ */ - smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, - qentry); - wake_up_interruptible(&lgr->llc_waiter); - } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, - qentry)) { - schedule_work(&lgr->llc_del_link_work); - } - } else { - if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && - !lgr->llc_flow_lcl.qentry) { - /* DEL LINK REQ during ADD LINK SEQ */ - smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, - qentry); - wake_up_interruptible(&lgr->llc_waiter); - } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, - qentry)) { - schedule_work(&lgr->llc_del_link_work); - } + if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && + !lgr->llc_flow_lcl.qentry) { + /* DEL LINK REQ during ADD LINK SEQ */ + smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); + wake_up(&lgr->llc_msg_waiter); + } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { + schedule_work(&lgr->llc_del_link_work); } return; case SMC_LLC_CONFIRM_RKEY: @@ -1566,23 +1616,30 @@ static void smc_llc_event_work(struct work_struct *work) static void smc_llc_rx_response(struct smc_link *link, struct smc_llc_qentry *qentry) { + enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type; + struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl; u8 llc_type = qentry->msg.raw.hdr.common.type; switch (llc_type) { case SMC_LLC_TEST_LINK: - if (link->state == SMC_LNK_ACTIVE) + if (smc_link_active(link)) complete(&link->llc_testlink_resp); break; case SMC_LLC_ADD_LINK: - case SMC_LLC_DELETE_LINK: - case SMC_LLC_CONFIRM_LINK: case SMC_LLC_ADD_LINK_CONT: + case SMC_LLC_CONFIRM_LINK: + if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry) + break; /* drop out-of-flow response */ + goto assign; + case SMC_LLC_DELETE_LINK: + if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry) + break; /* drop out-of-flow response */ + goto assign; case SMC_LLC_CONFIRM_RKEY: case SMC_LLC_DELETE_RKEY: - /* assign responses to the local flow, we requested them */ - smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry); - wake_up_interruptible(&link->lgr->llc_waiter); - return; + if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry) + break; /* drop out-of-flow response */ + goto assign; case SMC_LLC_CONFIRM_RKEY_CONT: /* not used because max links is 3 */ break; @@ -1591,6 +1648,11 @@ static void smc_llc_rx_response(struct smc_link *link, break; } kfree(qentry); + return; +assign: + /* assign responses to the local flow, we requested them */ + smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry); + wake_up(&link->lgr->llc_msg_waiter); } static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc) @@ -1616,7 +1678,7 @@ static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc) spin_lock_irqsave(&lgr->llc_event_q_lock, flags); list_add_tail(&qentry->list, &lgr->llc_event_q); spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags); - schedule_work(&link->lgr->llc_event_work); + schedule_work(&lgr->llc_event_work); } /* copy received msg and add it to the event queue */ @@ -1644,7 +1706,7 @@ static void smc_llc_testlink_work(struct work_struct *work) u8 user_data[16] = { 0 }; int rc; - if (link->state != SMC_LNK_ACTIVE) + if (!smc_link_active(link)) return; /* don't reschedule worker */ expire_time = link->wr_rx_tstamp + link->llc_testlink_time; if (time_is_after_jiffies(expire_time)) { @@ -1656,7 +1718,7 @@ static void smc_llc_testlink_work(struct work_struct *work) /* receive TEST LINK response over RoCE fabric */ rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp, SMC_LLC_WAIT_TIME); - if (link->state != SMC_LNK_ACTIVE) + if (!smc_link_active(link)) return; /* link state changed */ if (rc <= 0) { smcr_link_down_cond_sched(link); @@ -1677,7 +1739,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) INIT_LIST_HEAD(&lgr->llc_event_q); spin_lock_init(&lgr->llc_event_q_lock); spin_lock_init(&lgr->llc_flow_lock); - init_waitqueue_head(&lgr->llc_waiter); + init_waitqueue_head(&lgr->llc_flow_waiter); + init_waitqueue_head(&lgr->llc_msg_waiter); mutex_init(&lgr->llc_conf_mutex); lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time; } @@ -1686,7 +1749,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) void smc_llc_lgr_clear(struct smc_link_group *lgr) { smc_llc_event_flush(lgr); - wake_up_interruptible_all(&lgr->llc_waiter); + wake_up_all(&lgr->llc_flow_waiter); + wake_up_all(&lgr->llc_msg_waiter); cancel_work_sync(&lgr->llc_event_work); cancel_work_sync(&lgr->llc_add_link_work); cancel_work_sync(&lgr->llc_del_link_work); diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h index a5d2fe3eea61ff475294583e609a03e5f94716cb..cc00a2ec4e92e8060bce1926169832f7f7d96eb2 100644 --- a/net/smc/smc_llc.h +++ b/net/smc/smc_llc.h @@ -103,7 +103,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn); int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry); int smc_llc_srv_add_link(struct smc_link *link); -void smc_llc_srv_add_link_local(struct smc_link *link); +void smc_llc_add_link_local(struct smc_link *link); int smc_llc_init(void) __init; #endif /* SMC_LLC_H */ diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 014d91b9778ec4ba5f701080b7ca64b3bf236c60..30e5fac7034e1b9ea1d49c8a1251db59ff4ff87c 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -129,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) return rc; /* remove ib devices */ - spin_lock(&smc_ib_devices.lock); + mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) { if (ibdev->pnetid_by_user[ibport] && @@ -149,9 +150,9 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) } } } - spin_unlock(&smc_ib_devices.lock); + mutex_unlock(&smc_ib_devices.mutex); /* remove smcd devices */ - spin_lock(&smcd_dev_list.lock); + mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) { if (smcd_dev->pnetid_by_user && (!pnet_name || @@ -165,7 +166,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) rc = 0; } } - spin_unlock(&smcd_dev_list.lock); + mutex_unlock(&smcd_dev_list.mutex); return rc; } @@ -240,14 +241,14 @@ static bool smc_pnet_apply_ib(struct smc_ib_device *ib_dev, u8 ib_port, u8 pnet_null[SMC_MAX_PNETID_LEN] = {0}; bool applied = false; - spin_lock(&smc_ib_devices.lock); + mutex_lock(&smc_ib_devices.mutex); if (smc_pnet_match(ib_dev->pnetid[ib_port - 1], pnet_null)) { memcpy(ib_dev->pnetid[ib_port - 1], pnet_name, SMC_MAX_PNETID_LEN); ib_dev->pnetid_by_user[ib_port - 1] = true; applied = true; } - spin_unlock(&smc_ib_devices.lock); + mutex_unlock(&smc_ib_devices.mutex); return applied; } @@ -258,13 +259,13 @@ static bool smc_pnet_apply_smcd(struct smcd_dev *smcd_dev, char *pnet_name) u8 pnet_null[SMC_MAX_PNETID_LEN] = {0}; bool applied = false; - spin_lock(&smcd_dev_list.lock); + mutex_lock(&smcd_dev_list.mutex); if (smc_pnet_match(smcd_dev->pnetid, pnet_null)) { memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN); smcd_dev->pnetid_by_user = true; applied = true; } - spin_unlock(&smcd_dev_list.lock); + mutex_unlock(&smcd_dev_list.mutex); return applied; } @@ -300,7 +301,7 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name) { struct smc_ib_device *ibdev; - spin_lock(&smc_ib_devices.lock); + mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (!strncmp(ibdev->ibdev->name, ib_name, sizeof(ibdev->ibdev->name)) || @@ -311,7 +312,7 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name) } ibdev = NULL; out: - spin_unlock(&smc_ib_devices.lock); + mutex_unlock(&smc_ib_devices.mutex); return ibdev; } @@ -320,7 +321,7 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name) { struct smcd_dev *smcd_dev; - spin_lock(&smcd_dev_list.lock); + mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) { if (!strncmp(dev_name(&smcd_dev->dev), smcd_name, IB_DEVICE_NAME_MAX - 1)) @@ -328,7 +329,7 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name) } smcd_dev = NULL; out: - spin_unlock(&smcd_dev_list.lock); + mutex_unlock(&smcd_dev_list.mutex); return smcd_dev; } @@ -825,7 +826,7 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id, int i; ini->ib_dev = NULL; - spin_lock(&smc_ib_devices.lock); + mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (ibdev == known_dev) continue; @@ -844,7 +845,7 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id, } } out: - spin_unlock(&smc_ib_devices.lock); + mutex_unlock(&smc_ib_devices.mutex); } /* find alternate roce device with same pnet_id and vlan_id */ @@ -863,7 +864,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev, { struct smc_ib_device *ibdev; - spin_lock(&smc_ib_devices.lock); + mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { struct net_device *ndev; int i; @@ -888,7 +889,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev, } } } - spin_unlock(&smc_ib_devices.lock); + mutex_unlock(&smc_ib_devices.mutex); } /* Determine the corresponding IB device port based on the hardware PNETID. @@ -924,7 +925,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev, smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) return; /* pnetid could not be determined */ - spin_lock(&smcd_dev_list.lock); + mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(ismdev, &smcd_dev_list.list, list) { if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) && !ismdev->going_away) { @@ -932,7 +933,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev, break; } } - spin_unlock(&smcd_dev_list.lock); + mutex_unlock(&smcd_dev_list.mutex); } /* PNET table analysis for a given sock: diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 7239ba9b99dc6eb9b0f437064ce1a64c88139d86..1e23cdd41eb1ec7740624e1269abe630c964380c 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -169,6 +169,8 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context) static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) { *idx = link->wr_tx_cnt; + if (!smc_link_usable(link)) + return -ENOLINK; for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) { if (!test_and_set_bit(*idx, link->wr_tx_mask)) return 0; @@ -560,15 +562,15 @@ void smc_wr_free_link(struct smc_link *lnk) { struct ib_device *ibdev; + if (!lnk->smcibdev) + return; + ibdev = lnk->smcibdev->ibdev; + if (smc_wr_tx_wait_no_pending_sends(lnk)) memset(lnk->wr_tx_mask, 0, BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); - if (!lnk->smcibdev) - return; - ibdev = lnk->smcibdev->ibdev; - if (lnk->wr_rx_dma_addr) { ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 935bbef2f7bef1388f33697e05d465296131511e..453bacc99907837c12df21d67e611c30ed5a188a 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -71,7 +71,7 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs) size = RPCRDMA_HDRLEN_MIN; /* Maximum Read list size */ - size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32); + size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32); /* Minimal Read chunk size */ size += sizeof(__be32); /* segment count */ @@ -94,7 +94,7 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs) size = RPCRDMA_HDRLEN_MIN; /* Maximum Write list size */ - size = sizeof(__be32); /* segment count */ + size += sizeof(__be32); /* segment count */ size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32); size += sizeof(__be32); /* list discriminator */ diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 14165b673b2068685e9e98a8c232d831dd33aeb0..053c8ab1265a7088b984a8a4ac62b0a1a17e029e 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -249,6 +249,11 @@ xprt_rdma_connect_worker(struct work_struct *work) xprt->stat.connect_start; xprt_set_connected(xprt); rc = -EAGAIN; + } else { + /* Force a call to xprt_rdma_close to clean up */ + spin_lock(&xprt->transport_lock); + set_bit(XPRT_CLOSE_WAIT, &xprt->state); + spin_unlock(&xprt->transport_lock); } xprt_wake_pending_tasks(xprt, rc); } diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 2198c8ec8dff62e7c3ce620fe3c22418c5d12e3c..75c646743df3ef7ccc1f348eb8914a5f4e330427 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -281,17 +281,19 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) break; case RDMA_CM_EVENT_CONNECT_ERROR: ep->re_connect_status = -ENOTCONN; - goto disconnected; + goto wake_connect_worker; case RDMA_CM_EVENT_UNREACHABLE: ep->re_connect_status = -ENETUNREACH; - goto disconnected; + goto wake_connect_worker; case RDMA_CM_EVENT_REJECTED: dprintk("rpcrdma: connection to %pISpc rejected: %s\n", sap, rdma_reject_msg(id, event->status)); ep->re_connect_status = -ECONNREFUSED; if (event->status == IB_CM_REJ_STALE_CONN) - ep->re_connect_status = -EAGAIN; - goto disconnected; + ep->re_connect_status = -ENOTCONN; +wake_connect_worker: + wake_up_all(&ep->re_connect_wait); + return 0; case RDMA_CM_EVENT_DISCONNECTED: ep->re_connect_status = -ECONNABORTED; disconnected: @@ -400,14 +402,14 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) ep = kzalloc(sizeof(*ep), GFP_NOFS); if (!ep) - return -EAGAIN; + return -ENOTCONN; ep->re_xprt = &r_xprt->rx_xprt; kref_init(&ep->re_kref); id = rpcrdma_create_id(r_xprt, ep); if (IS_ERR(id)) { - rc = PTR_ERR(id); - goto out_free; + kfree(ep); + return PTR_ERR(id); } __module_get(THIS_MODULE); device = id->device; @@ -506,9 +508,6 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) out_destroy: rpcrdma_ep_put(ep); rdma_destroy_id(id); -out_free: - kfree(ep); - r_xprt->rx_ep = NULL; return rc; } @@ -524,8 +523,6 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt) struct rpcrdma_ep *ep; int rc; -retry: - rpcrdma_xprt_disconnect(r_xprt); rc = rpcrdma_ep_create(r_xprt); if (rc) return rc; @@ -540,10 +537,6 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt) rpcrdma_ep_get(ep); rpcrdma_post_recvs(r_xprt, true); - rc = rpcrdma_sendctxs_create(r_xprt); - if (rc) - goto out; - rc = rdma_connect(ep->re_id, &ep->re_remote_cma); if (rc) goto out; @@ -553,15 +546,19 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt) wait_event_interruptible(ep->re_connect_wait, ep->re_connect_status != 0); if (ep->re_connect_status <= 0) { - if (ep->re_connect_status == -EAGAIN) - goto retry; rc = ep->re_connect_status; goto out; } + rc = rpcrdma_sendctxs_create(r_xprt); + if (rc) { + rc = -ENOTCONN; + goto out; + } + rc = rpcrdma_reqs_setup(r_xprt); if (rc) { - rpcrdma_xprt_disconnect(r_xprt); + rc = -ENOTCONN; goto out; } rpcrdma_mrs_create(r_xprt); diff --git a/net/tipc/link.c b/net/tipc/link.c index ee3b8d0576b8939b42b5b40f10524d42735884c2..d40f8e5b7683531f42e12950ac87ca33558e958e 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -827,11 +827,11 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) state |= l->bc_rcvlink->rcv_unacked; state |= l->rcv_unacked; state |= !skb_queue_empty(&l->transmq); - state |= !skb_queue_empty(&l->deferdq); probe = mstate->probing; probe |= l->silent_intv_cnt; if (probe || mstate->monitoring) l->silent_intv_cnt++; + probe |= !skb_queue_empty(&l->deferdq); if (l->snd_nxt == l->checkpoint) { tipc_link_update_cwin(l, 0, 0); probe = true; @@ -921,6 +921,21 @@ static void link_prepare_wakeup(struct tipc_link *l) } +/** + * tipc_link_set_skb_retransmit_time - set the time at which retransmission of + * the given skb should be next attempted + * @skb: skb to set a future retransmission time for + * @l: link the skb will be transmitted on + */ +static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb, + struct tipc_link *l) +{ + if (link_is_bc_sndlink(l)) + TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; + else + TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; +} + void tipc_link_reset(struct tipc_link *l) { struct sk_buff_head list; @@ -1036,9 +1051,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, return -ENOBUFS; } __skb_queue_tail(transmq, skb); - /* next retransmit attempt */ - if (link_is_bc_sndlink(l)) - TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; + tipc_link_set_skb_retransmit_time(skb, l); __skb_queue_tail(xmitq, _skb); TIPC_SKB_CB(skb)->ackers = l->ackers; l->rcv_unacked = 0; @@ -1139,9 +1152,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l, if (unlikely(skb == l->backlog[imp].target_bskb)) l->backlog[imp].target_bskb = NULL; __skb_queue_tail(&l->transmq, skb); - /* next retransmit attempt */ - if (link_is_bc_sndlink(l)) - TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; + tipc_link_set_skb_retransmit_time(skb, l); __skb_queue_tail(xmitq, _skb); TIPC_SKB_CB(skb)->ackers = l->ackers; @@ -1584,8 +1595,7 @@ static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r, /* retransmit skb if unrestricted*/ if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) continue; - TIPC_SKB_CB(skb)->nxt_retr = (is_uc) ? - TIPC_UC_RETR_TIME : TIPC_BC_RETR_LIM; + tipc_link_set_skb_retransmit_time(skb, l); _skb = pskb_copy(skb, GFP_ATOMIC); if (!_skb) continue; diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index dfbaf6bd8b1c759f41028c583efc590fba52c261..2700a63ab095e51b45af185d97f9919b62fe0072 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -22,7 +22,7 @@ #include static struct workqueue_struct *virtio_vsock_workqueue; -static struct virtio_vsock *the_virtio_vsock; +static struct virtio_vsock __rcu *the_virtio_vsock; static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ struct virtio_vsock { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 263ae395ad443fb2553a6f9af9df11363090f7da..7fbca0854265a2aac1497b217db4d0be18e8812f 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5016,7 +5016,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) err = nl80211_parse_he_obss_pd( info->attrs[NL80211_ATTR_HE_OBSS_PD], ¶ms.he_obss_pd); - goto out; + if (err) + goto out; } if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) { @@ -5024,7 +5025,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) info->attrs[NL80211_ATTR_HE_BSS_COLOR], ¶ms.he_bss_color); if (err) - return err; + goto out; } nl80211_calculate_ap_params(¶ms); @@ -13265,13 +13266,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) if (!wdev_running(wdev)) return -ENETDOWN; } - - if (!vcmd->doit) - return -EOPNOTSUPP; } else { wdev = NULL; } + if (!vcmd->doit) + return -EOPNOTSUPP; + if (info->attrs[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]); len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]); diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 540ed75e44821cc62e1ee927f9ae3c196487c862..08b80669f6495591fe4e9255ca0955f8ef83f7de 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -2,9 +2,6 @@ #include #include -#include -#include -#include #include "xsk_queue.h" @@ -55,7 +52,6 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks, pool->free_heads_cnt = chunks; pool->headroom = headroom; pool->chunk_size = chunk_size; - pool->cheap_dma = true; pool->unaligned = unaligned; pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM; INIT_LIST_HEAD(&pool->free_list); @@ -125,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool) } } -static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool) -{ -#if defined(CONFIG_SWIOTLB) - phys_addr_t paddr; - u32 i; - - for (i = 0; i < pool->dma_pages_cnt; i++) { - paddr = dma_to_phys(pool->dev, pool->dma_pages[i]); - if (is_swiotlb_buffer(paddr)) - return false; - } -#endif - return true; -} - -static bool xp_check_cheap_dma(struct xsk_buff_pool *pool) -{ -#if defined(CONFIG_HAS_DMA) - const struct dma_map_ops *ops = get_dma_ops(pool->dev); - - if (ops) { - return !ops->sync_single_for_cpu && - !ops->sync_single_for_device; - } - - if (!dma_is_direct(ops)) - return false; - - if (!xp_check_swiotlb_dma(pool)) - return false; - - if (!dev_is_dma_coherent(pool->dev)) { -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) - return false; -#endif - } -#endif - return true; -} - int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, unsigned long attrs, struct page **pages, u32 nr_pages) { @@ -180,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, pool->dev = dev; pool->dma_pages_cnt = nr_pages; + pool->dma_need_sync = false; for (i = 0; i < pool->dma_pages_cnt; i++) { dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, @@ -188,14 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, xp_dma_unmap(pool, attrs); return -ENOMEM; } + if (dma_need_sync(dev, dma)) + pool->dma_need_sync = true; pool->dma_pages[i] = dma; } if (pool->unaligned) xp_check_dma_contiguity(pool); - - pool->dev = dev; - pool->cheap_dma = xp_check_cheap_dma(pool); return 0; } EXPORT_SYMBOL(xp_dma_map); @@ -280,7 +234,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; xskb->xdp.data_meta = xskb->xdp.data; - if (!pool->cheap_dma) { + if (pool->dma_need_sync) { dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, pool->frame_len, DMA_BIDIRECTIONAL); diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index 100e29682b481d3080254a762d5aade9256c8aa2..827ccdf2db57f6b567a079c5ade39279061d84e4 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -15,6 +15,7 @@ static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb, { if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, skb->truesize)) { + XFRM_INC_STATS(sock_net(sk), LINUX_MIB_XFRMINERROR); kfree_skb(skb); return; } @@ -49,23 +50,51 @@ static void espintcp_rcv(struct strparser *strp, struct sk_buff *skb) struct espintcp_ctx *ctx = container_of(strp, struct espintcp_ctx, strp); struct strp_msg *rxm = strp_msg(skb); + int len = rxm->full_len - 2; u32 nonesp_marker; int err; + /* keepalive packet? */ + if (unlikely(len == 1)) { + u8 data; + + err = skb_copy_bits(skb, rxm->offset + 2, &data, 1); + if (err < 0) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); + kfree_skb(skb); + return; + } + + if (data == 0xff) { + kfree_skb(skb); + return; + } + } + + /* drop other short messages */ + if (unlikely(len <= sizeof(nonesp_marker))) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); + kfree_skb(skb); + return; + } + err = skb_copy_bits(skb, rxm->offset + 2, &nonesp_marker, sizeof(nonesp_marker)); if (err < 0) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); kfree_skb(skb); return; } /* remove header, leave non-ESP marker/SPI */ if (!__pskb_pull(skb, rxm->offset + 2)) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR); kfree_skb(skb); return; } if (pskb_trim(skb, rxm->full_len - 2) != 0) { + XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR); kfree_skb(skb); return; } @@ -91,7 +120,7 @@ static int espintcp_parse(struct strparser *strp, struct sk_buff *skb) return err; len = be16_to_cpu(blen); - if (len < 6) + if (len < 2) return -EINVAL; return len; @@ -109,8 +138,11 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, flags |= nonblock ? MSG_DONTWAIT : 0; skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err); - if (!skb) + if (!skb) { + if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) + return 0; return err; + } copied = len; if (copied > skb->len) @@ -213,7 +245,7 @@ static int espintcp_sendskmsg_locked(struct sock *sk, return 0; } -static int espintcp_push_msgs(struct sock *sk) +static int espintcp_push_msgs(struct sock *sk, int flags) { struct espintcp_ctx *ctx = espintcp_getctx(sk); struct espintcp_msg *emsg = &ctx->partial; @@ -227,12 +259,12 @@ static int espintcp_push_msgs(struct sock *sk) ctx->tx_running = 1; if (emsg->skb) - err = espintcp_sendskb_locked(sk, emsg, 0); + err = espintcp_sendskb_locked(sk, emsg, flags); else - err = espintcp_sendskmsg_locked(sk, emsg, 0); + err = espintcp_sendskmsg_locked(sk, emsg, flags); if (err == -EAGAIN) { ctx->tx_running = 0; - return 0; + return flags & MSG_DONTWAIT ? -EAGAIN : 0; } if (!err) memset(emsg, 0, sizeof(*emsg)); @@ -257,7 +289,7 @@ int espintcp_push_skb(struct sock *sk, struct sk_buff *skb) offset = skb_transport_offset(skb); len = skb->len - offset; - espintcp_push_msgs(sk); + espintcp_push_msgs(sk, 0); if (emsg->len) { kfree_skb(skb); @@ -270,7 +302,7 @@ int espintcp_push_skb(struct sock *sk, struct sk_buff *skb) emsg->len = len; emsg->skb = skb; - espintcp_push_msgs(sk); + espintcp_push_msgs(sk, 0); return 0; } @@ -287,7 +319,7 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) char buf[2] = {0}; int err, end; - if (msg->msg_flags) + if (msg->msg_flags & ~MSG_DONTWAIT) return -EOPNOTSUPP; if (size > MAX_ESPINTCP_MSG) @@ -298,9 +330,10 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) lock_sock(sk); - err = espintcp_push_msgs(sk); + err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT); if (err < 0) { - err = -ENOBUFS; + if (err != -EAGAIN || !(msg->msg_flags & MSG_DONTWAIT)) + err = -ENOBUFS; goto unlock; } @@ -337,10 +370,9 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) tcp_rate_check_app_limited(sk); - err = espintcp_push_msgs(sk); + err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT); /* this message could be partially sent, keep it */ - if (err < 0) - goto unlock; + release_sock(sk); return size; @@ -374,7 +406,7 @@ static void espintcp_tx_work(struct work_struct *work) lock_sock(sk); if (!ctx->tx_running) - espintcp_push_msgs(sk); + espintcp_push_msgs(sk, 0); release_sock(sk); } diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index c407ecbc5d462b952aa60cf41c98e8a5c7e39ddf..b615729812e5acd1c1be39f402db5a9a6061e25d 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -581,6 +582,7 @@ static const struct net_device_ops xfrmi_netdev_ops = { static void xfrmi_dev_setup(struct net_device *dev) { dev->netdev_ops = &xfrmi_netdev_ops; + dev->header_ops = &ip_tunnel_header_ops; dev->type = ARPHRD_NONE; dev->mtu = ETH_DATA_LEN; dev->min_mtu = ETH_MIN_MTU; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 564aa6492e7c397312f51e350a84b2063c5ed616..19c5e0fa3f44d89fa7767bbaa6be0a579dd5f26e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -39,7 +39,7 @@ #ifdef CONFIG_XFRM_STATISTICS #include #endif -#ifdef CONFIG_INET_ESPINTCP +#ifdef CONFIG_XFRM_ESPINTCP #include #endif @@ -1433,14 +1433,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, spin_unlock_bh(&pq->hold_queue.lock); } -static bool xfrm_policy_mark_match(struct xfrm_policy *policy, - struct xfrm_policy *pol) +static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark, + struct xfrm_policy *pol) { - if (policy->mark.v == pol->mark.v && - policy->priority == pol->priority) - return true; - - return false; + return mark->v == pol->mark.v && mark->m == pol->mark.m; } static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed) @@ -1503,7 +1499,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { delpol = pol; @@ -1538,7 +1534,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain, if (pol->type == policy->type && pol->if_id == policy->if_id && !selector_cmp(&pol->selector, &policy->selector) && - xfrm_policy_mark_match(policy, pol) && + xfrm_policy_mark_match(&policy->mark, pol) && xfrm_sec_ctx_match(pol->security, policy->security) && !WARN_ON(delpol)) { if (excl) @@ -1610,9 +1606,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) EXPORT_SYMBOL(xfrm_policy_insert); static struct xfrm_policy * -__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, +__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark, + u32 if_id, u8 type, int dir, struct xfrm_selector *sel, struct xfrm_sec_ctx *ctx) { struct xfrm_policy *pol; @@ -1623,7 +1618,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, hlist_for_each_entry(pol, chain, bydst) { if (pol->type == type && pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v && + xfrm_policy_mark_match(mark, pol) && !selector_cmp(sel, &pol->selector) && xfrm_sec_ctx_match(ctx, pol->security)) return pol; @@ -1632,11 +1627,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, return NULL; } -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, - struct xfrm_selector *sel, - struct xfrm_sec_ctx *ctx, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, struct xfrm_selector *sel, + struct xfrm_sec_ctx *ctx, int delete, int *err) { struct xfrm_pol_inexact_bin *bin = NULL; struct xfrm_policy *pol, *ret = NULL; @@ -1703,9 +1697,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, } EXPORT_SYMBOL(xfrm_policy_bysel_ctx); -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, - u8 type, int dir, u32 id, int delete, - int *err) +struct xfrm_policy * +xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id, + u8 type, int dir, u32 id, int delete, int *err) { struct xfrm_policy *pol, *ret; struct hlist_head *chain; @@ -1720,8 +1714,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, ret = NULL; hlist_for_each_entry(pol, chain, byidx) { if (pol->type == type && pol->index == id && - pol->if_id == if_id && - (mark & pol->mark.m) == pol->mark.v) { + pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) { xfrm_pol_hold(pol); if (delete) { *err = security_xfrm_policy_delete( @@ -4156,7 +4149,7 @@ void __init xfrm_init(void) seqcount_init(&xfrm_policy_hash_generation); xfrm_input_init(); -#ifdef CONFIG_INET_ESPINTCP +#ifdef CONFIG_XFRM_ESPINTCP espintcp_init(); #endif diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e6cfaa680ef3d9cd0efb7b98ee41f0caccdbdce0..fbb7d9d064787884ac1c272cca2c2e2d58735a9d 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct km_event c; int delete; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; p = nlmsg_data(nlh); @@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, + p->index, delete, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel, - ctx, delete, &err); + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, + &p->sel, ctx, delete, &err); security_xfrm_policy_free(ctx); } if (xp == NULL) @@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, u8 type = XFRM_POLICY_TYPE_MAIN; int err = -ENOENT; struct xfrm_mark m; - u32 mark = xfrm_mark_get(attrs, &m); u32 if_id = 0; err = copy_from_user_policy_type(&type, attrs); @@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + xfrm_mark_get(attrs, &m); + if (p->index) - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err); + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, + 0, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; @@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; } - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, &p->sel, ctx, 0, &err); security_xfrm_policy_free(ctx); } diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 4aea7cf71d11f5660563b1252d0bd2e492221283..62c275685b75e487a8646d8f7554122d1171009b 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -35,6 +35,7 @@ KBUILD_CFLAGS += $(call cc-option, -Wstringop-truncation) # The following turn off the warnings enabled by -Wextra KBUILD_CFLAGS += -Wno-missing-field-initializers KBUILD_CFLAGS += -Wno-sign-compare +KBUILD_CFLAGS += -Wno-type-limits KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN1 @@ -66,6 +67,7 @@ KBUILD_CFLAGS += -Wshadow KBUILD_CFLAGS += $(call cc-option, -Wlogical-op) KBUILD_CFLAGS += -Wmissing-field-initializers KBUILD_CFLAGS += -Wsign-compare +KBUILD_CFLAGS += -Wtype-limits KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized) KBUILD_CFLAGS += $(call cc-option, -Wunused-macros) diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index 3651cbf6ad49d39e65163e6849a65a68b747c4c8..f54b6ac37ac2ecd946708ef21904dbd01109b75d 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost @@ -124,9 +124,6 @@ existing-targets := $(wildcard $(sort $(targets))) -include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) -PHONY += FORCE -FORCE: - endif .PHONY: $(PHONY) diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh index 66a6d511b5247111b84aa6d1eb5c811fc34be9f9..0869def435ee78f999e90ccdc5d2335685e74144 100755 --- a/scripts/decode_stacktrace.sh +++ b/scripts/decode_stacktrace.sh @@ -87,8 +87,8 @@ parse_symbol() { return fi - # Strip out the base of the path - code=${code#$basepath/} + # Strip out the base of the path on each line + code=$(while read -r line; do echo "${line#$basepath/}"; done <<< "$code") # In the case of inlines, move everything to same line code=${code//$'\n'/' '} diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py index be984aa29b759cb1e42bb2c8d6e42eb4b061fb9c..1be9763cf8bb224ee3b754ef2db9250e97cb19eb 100644 --- a/scripts/gdb/linux/symbols.py +++ b/scripts/gdb/linux/symbols.py @@ -96,7 +96,7 @@ lx-symbols command.""" return "" attrs = sect_attrs['attrs'] section_name_to_address = { - attrs[n]['name'].string(): attrs[n]['address'] + attrs[n]['battr']['attr']['name'].string(): attrs[n]['address'] for n in range(int(sect_attrs['nsections']))} args = [] for section_name in [".data", ".data..read_mostly", ".rodata", ".bss", diff --git a/scripts/kconfig/.gitignore b/scripts/kconfig/.gitignore index 12a67fdab541dc90f5a2f721d7b811c8e6c9f2b7..c3d537cd027541ec3d5e079e47d56afebe969931 100644 --- a/scripts/kconfig/.gitignore +++ b/scripts/kconfig/.gitignore @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -*.moc +/qconf-moc.cc *conf-cfg # diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 426881ea954f147b66e2da581e58fe57de7e70f7..52b59bf9efe41314cf2060ab2111e88c3447a61d 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -181,19 +181,22 @@ $(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/mconf-cfg # qconf: Used for the xconfig target based on Qt hostprogs += qconf -qconf-cxxobjs := qconf.o +qconf-cxxobjs := qconf.o qconf-moc.o qconf-objs := images.o $(common-objs) HOSTLDLIBS_qconf = $(shell . $(obj)/qconf-cfg && echo $$libs) HOSTCXXFLAGS_qconf.o = $(shell . $(obj)/qconf-cfg && echo $$cflags) +HOSTCXXFLAGS_qconf-moc.o = $(shell . $(obj)/qconf-cfg && echo $$cflags) -$(obj)/qconf.o: $(obj)/qconf-cfg $(obj)/qconf.moc +$(obj)/qconf.o: $(obj)/qconf-cfg quiet_cmd_moc = MOC $@ - cmd_moc = $(shell . $(obj)/qconf-cfg && echo $$moc) -i $< -o $@ + cmd_moc = $(shell . $(obj)/qconf-cfg && echo $$moc) $< -o $@ -$(obj)/%.moc: $(src)/%.h $(obj)/qconf-cfg - $(call cmd,moc) +$(obj)/qconf-moc.cc: $(src)/qconf.h $(obj)/qconf-cfg FORCE + $(call if_changed,moc) + +targets += qconf-moc.cc # gconf: Used for the gconfig target based on GTK+ hostprogs += gconf diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index 4a616128a15484c6f9359cdd9ba35901dfebb41a..23d1cb01a41aef4c0675c2b46b6cda4a4015a129 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -23,7 +23,6 @@ #include "lkc.h" #include "qconf.h" -#include "qconf.moc" #include "images.h" @@ -308,10 +307,7 @@ ConfigList::ConfigList(ConfigView* p, const char *name) setVerticalScrollMode(ScrollPerPixel); setHorizontalScrollMode(ScrollPerPixel); - if (mode == symbolMode) - setHeaderLabels(QStringList() << "Item" << "Name" << "N" << "M" << "Y" << "Value"); - else - setHeaderLabels(QStringList() << "Option" << "Name" << "N" << "M" << "Y" << "Value"); + setHeaderLabels(QStringList() << "Option" << "Name" << "N" << "M" << "Y" << "Value"); connect(this, SIGNAL(itemSelectionChanged(void)), SLOT(updateSelection(void))); @@ -392,11 +388,6 @@ void ConfigList::updateSelection(void) struct menu *menu; enum prop_type type; - if (mode == symbolMode) - setHeaderLabels(QStringList() << "Item" << "Name" << "N" << "M" << "Y" << "Value"); - else - setHeaderLabels(QStringList() << "Option" << "Name" << "N" << "M" << "Y" << "Value"); - if (selectedItems().count() == 0) return; @@ -437,14 +428,13 @@ void ConfigList::updateList(ConfigItem* item) if (rootEntry != &rootmenu && (mode == singleMode || (mode == symbolMode && rootEntry->parent != &rootmenu))) { item = (ConfigItem *)topLevelItem(0); - if (!item && mode != symbolMode) { + if (!item) item = new ConfigItem(this, 0, true); - last = item; - } + last = item; } if ((mode == singleMode || (mode == symbolMode && !(rootEntry->flags & MENU_ROOT))) && rootEntry->sym && rootEntry->prompt) { - item = last ? last->nextSibling() : firstChild(); + item = last ? last->nextSibling() : nullptr; if (!item) item = new ConfigItem(this, last, rootEntry, true); else @@ -1239,7 +1229,7 @@ void ConfigInfoView::clicked(const QUrl &url) if (count < 1) { qInfo() << "Clicked link is empty"; - delete data; + delete[] data; return; } @@ -1252,7 +1242,7 @@ void ConfigInfoView::clicked(const QUrl &url) result = sym_re_search(data); if (!result) { qInfo() << "Clicked symbol is invalid:" << data; - delete data; + delete[] data; return; } @@ -1735,7 +1725,6 @@ void ConfigMainWindow::listFocusChanged(void) void ConfigMainWindow::goBack(void) { -qInfo() << __FUNCTION__; if (configList->rootEntry == &rootmenu) return; diff --git a/scripts/kconfig/qconf.h b/scripts/kconfig/qconf.h index fb9e9729266fc5a14ee9b5f1becc1355f70a6eb4..5eeab4a8bb436f0f7c6392bd8b600e11e8145e46 100644 --- a/scripts/kconfig/qconf.h +++ b/scripts/kconfig/qconf.h @@ -92,10 +92,6 @@ public slots: { return this; } - ConfigItem* firstChild() const - { - return (ConfigItem *)children().first(); - } void addColumn(colIdx idx) { showColumn(idx); diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 6aea65c657454c67a937e1eccb16f0b204844486..69341b36f27119d262108d4e850f3d8e2b253d21 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -138,11 +138,20 @@ char *read_text_file(const char *filename) char *get_line(char **stringp) { + char *orig = *stringp, *next; + /* do not return the unwanted extra line at EOF */ - if (*stringp && **stringp == '\0') + if (!orig || *orig == '\0') return NULL; - return strsep(stringp, "\n"); + /* don't use strsep here, it is not available everywhere */ + next = strchr(orig, '\n'); + if (next) + *next++ = '\0'; + + *stringp = next; + + return orig; } /* A list of all modules we processed */ diff --git a/security/integrity/iint.c b/security/integrity/iint.c index e12c4900510f607bd7a09be7ce3ec7d150b5954b..1d20003243c3fb6f176dbefd76dea1e087c52458 100644 --- a/security/integrity/iint.c +++ b/security/integrity/iint.c @@ -188,19 +188,7 @@ DEFINE_LSM(integrity) = { int integrity_kernel_read(struct file *file, loff_t offset, void *addr, unsigned long count) { - mm_segment_t old_fs; - char __user *buf = (char __user *)addr; - ssize_t ret; - - if (!(file->f_mode & FMODE_READ)) - return -EBADF; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = __vfs_read(file, buf, count, &offset); - set_fs(old_fs); - - return ret; + return __kernel_read(file, addr, count, &offset); } /* diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index 509290f2efa8ecee7971c272deba82a4136ba50d..0e53f6f319167c9e4dd3b1e342bf6b8e2fe1f321 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c @@ -764,6 +764,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream) retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP); if (!retval) { + /* clear flags and stop any drain wait */ + stream->partial_drain = false; + stream->metadata_set = false; snd_compr_drain_notify(stream); stream->runtime->total_bytes_available = 0; stream->runtime->total_bytes_transferred = 0; @@ -921,6 +924,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream) if (stream->next_track == false) return -EPERM; + stream->partial_drain = true; retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN); if (retval) { pr_debug("Partial drain returned failure\n"); diff --git a/sound/core/info.c b/sound/core/info.c index 8c6bc5241df50c55a66c35a8b4e9d8863664d5af..9fec3070f8ba30734a21a7376c90d94ec6d2f1cd 100644 --- a/sound/core/info.c +++ b/sound/core/info.c @@ -606,7 +606,9 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len) { int c; - if (snd_BUG_ON(!buffer || !buffer->buffer)) + if (snd_BUG_ON(!buffer)) + return 1; + if (!buffer->buffer) return 1; if (len <= 0 || buffer->stop || buffer->error) return 1; diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c index e69a4ef0d6bdea61218aa9c9b95b06d0cfdcddce..08c10ac9d6c87a75374ac97e787ded3be15c4617 100644 --- a/sound/drivers/opl3/opl3_synth.c +++ b/sound/drivers/opl3/opl3_synth.c @@ -91,6 +91,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file, { struct snd_dm_fm_info info; + memset(&info, 0, sizeof(info)); + info.fm_mode = opl3->fm_mode; info.rhythm = opl3->rhythm; if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info))) diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 2c6d2becfe1a0bce6b1910e9c144d05bb6b1334e..824f4ac1a8ce787d142ad42e17aab63b07b04c1e 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -72,6 +72,12 @@ static int compare_input_type(const void *ap, const void *bp) if (a->type != b->type) return (int)(a->type - b->type); + /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */ + if (a->is_headset_mic && b->is_headphone_mic) + return -1; /* don't swap */ + else if (a->is_headphone_mic && b->is_headset_mic) + return 1; /* swap */ + /* In case one has boost and the other one has not, pick the one with boost first. */ return (int)(b->has_boost_on_pin - a->has_boost_on_pin); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 7e3ae4534df9164cb57d69a19f29f006ad3a218c..803978d69e3c4d283bfbe1b63512296bfcf43402 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2935,6 +2935,10 @@ static int hda_codec_runtime_suspend(struct device *dev) struct hda_codec *codec = dev_to_hda_codec(dev); unsigned int state; + /* Nothing to do if card registration fails and the component driver never probes */ + if (!codec->card) + return 0; + cancel_delayed_work_sync(&codec->jackpoll_work); state = hda_call_codec_suspend(codec); if (codec->link_down_at_suspend || @@ -2949,6 +2953,10 @@ static int hda_codec_runtime_resume(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); + /* Nothing to do if card registration fails and the component driver never probes */ + if (!codec->card) + return 0; + codec_display_power(codec, true); snd_hdac_codec_link_up(&codec->core); hda_call_codec_resume(codec); diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 82e26442724ba0cb7fd0ed6ebff6a75b12b165c3..a356fb0e57738c7558e72127c44cc5afafb2b8c0 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -41,7 +41,7 @@ /* 24 unused */ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ -/* 27 unused */ +#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 3565e2ab0965a05b2a6a3a9989283304026b1b88..3fbba2e51e36daf8ff39012c8ed0af042f056509 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -298,7 +298,8 @@ enum { /* PCH for HSW/BDW; with runtime PM */ /* no i915 binding for this as HSW/BDW has another controller for HDMI */ #define AZX_DCAPS_INTEL_PCH \ - (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME) + (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ + AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) /* HSW HDMI */ #define AZX_DCAPS_INTEL_HASWELL \ @@ -1028,7 +1029,14 @@ static int azx_suspend(struct device *dev) chip = card->private_data; bus = azx_bus(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); - pm_runtime_force_suspend(dev); + /* An ugly workaround: direct call of __azx_runtime_suspend() and + * __azx_runtime_resume() for old Intel platforms that suffer from + * spurious wakeups after S3 suspend + */ + if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) + __azx_runtime_suspend(chip); + else + pm_runtime_force_suspend(dev); if (bus->irq >= 0) { free_irq(bus->irq, chip); bus->irq = -1; @@ -1057,7 +1065,10 @@ static int azx_resume(struct device *dev) if (azx_acquire_irq(chip, 1) < 0) return -EIO; - pm_runtime_force_resume(dev); + if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) + __azx_runtime_resume(chip, false); + else + pm_runtime_force_resume(dev); snd_power_change_state(card, SNDRV_CTL_POWER_D0); trace_azx_resume(chip); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index e2b21ef5d7d12994be0ca64d332d0d11c24486a0..cd46247988e4d38b2ff68b232874d398efacd9db 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -259,7 +259,7 @@ static int hinfo_to_pcm_index(struct hda_codec *codec, if (get_pcm_rec(spec, pcm_idx)->stream == hinfo) return pcm_idx; - codec_warn(codec, "HDMI: hinfo %p not registered\n", hinfo); + codec_warn(codec, "HDMI: hinfo %p not tied to a PCM\n", hinfo); return -EINVAL; } @@ -277,7 +277,8 @@ static int hinfo_to_pin_index(struct hda_codec *codec, return pin_idx; } - codec_dbg(codec, "HDMI: hinfo %p not registered\n", hinfo); + codec_dbg(codec, "HDMI: hinfo %p (pcm %d) not registered\n", hinfo, + hinfo_to_pcm_index(codec, hinfo)); return -EINVAL; } @@ -1804,33 +1805,43 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) static int hdmi_parse_codec(struct hda_codec *codec) { - hda_nid_t nid; + hda_nid_t start_nid; + unsigned int caps; int i, nodes; - nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &nid); - if (!nid || nodes < 0) { + nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid); + if (!start_nid || nodes < 0) { codec_warn(codec, "HDMI: failed to get afg sub nodes\n"); return -EINVAL; } - for (i = 0; i < nodes; i++, nid++) { - unsigned int caps; - unsigned int type; + /* + * hdmi_add_pin() assumes total amount of converters to + * be known, so first discover all converters + */ + for (i = 0; i < nodes; i++) { + hda_nid_t nid = start_nid + i; caps = get_wcaps(codec, nid); - type = get_wcaps_type(caps); if (!(caps & AC_WCAP_DIGITAL)) continue; - switch (type) { - case AC_WID_AUD_OUT: + if (get_wcaps_type(caps) == AC_WID_AUD_OUT) hdmi_add_cvt(codec, nid); - break; - case AC_WID_PIN: + } + + /* discover audio pins */ + for (i = 0; i < nodes; i++) { + hda_nid_t nid = start_nid + i; + + caps = get_wcaps(codec, nid); + + if (!(caps & AC_WCAP_DIGITAL)) + continue; + + if (get_wcaps_type(caps) == AC_WID_PIN) hdmi_add_pin(codec, nid); - break; - } } return 0; @@ -2429,6 +2440,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp, mutex_lock(&spec->bind_lock); spec->use_acomp_notifier = use_acomp; spec->codec->relaxed_resume = use_acomp; + spec->codec->bus->keep_power = 0; /* reprogram each jack detection logic depending on the notifier */ for (i = 0; i < spec->num_pins; i++) reprogram_jack_detect(spec->codec, @@ -2523,7 +2535,6 @@ static void generic_acomp_init(struct hda_codec *codec, if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops, match_bound_vga, 0)) { spec->acomp_registered = true; - codec->bus->keep_power = 0; } } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 737ef82a75fda2a6a8aecf7238df648c3fc6c1e9..29f5878f0c501119712b70e5be0240c9e3890633 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5975,6 +5975,16 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec, snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); } +static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action != HDA_FIXUP_ACT_INIT) + return; + + msleep(100); + alc_write_coef_idx(codec, 0x65, 0x0); +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" @@ -6149,6 +6159,13 @@ enum { ALC236_FIXUP_HP_MUTE_LED, ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, + ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS, + ALC269VC_FIXUP_ACER_HEADSET_MIC, + ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE, + ALC289_FIXUP_ASUS_GA401, + ALC289_FIXUP_ASUS_GA502, + ALC256_FIXUP_ACER_MIC_NO_PRESENCE, + ALC285_FIXUP_HP_GPIO_AMP_INIT, }; static const struct hda_fixup alc269_fixups[] = { @@ -7114,7 +7131,7 @@ static const struct hda_fixup alc269_fixups[] = { { } }, .chained = true, - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC294_FIXUP_ASUS_HEADSET_MIC] = { .type = HDA_FIXUP_PINS, @@ -7123,7 +7140,7 @@ static const struct hda_fixup alc269_fixups[] = { { } }, .chained = true, - .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + .chain_id = ALC269_FIXUP_HEADSET_MIC }, [ALC294_FIXUP_ASUS_SPK] = { .type = HDA_FIXUP_VERBS, @@ -7131,6 +7148,8 @@ static const struct hda_fixup alc269_fixups[] = { /* Set EAPD high */ { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 }, { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 }, + { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 }, { } }, .chained = true, @@ -7327,6 +7346,64 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC269_FIXUP_HEADSET_MODE }, + [ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x90100120 }, /* use as internal speaker */ + { 0x18, 0x02a111f0 }, /* use as headset mic, without its own jack detect */ + { 0x1a, 0x01011020 }, /* use as line out */ + { }, + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, + [ALC269VC_FIXUP_ACER_HEADSET_MIC] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x18, 0x02a11030 }, /* use as headset mic */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, + [ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x18, 0x01a11130 }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC + }, + [ALC289_FIXUP_ASUS_GA401] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11020 }, /* headset mic with jack detect */ + { } + }, + }, + [ALC289_FIXUP_ASUS_GA502] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11020 }, /* headset mic with jack detect */ + { } + }, + }, + [ALC256_FIXUP_ACER_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x02a11120 }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE + }, + [ALC285_FIXUP_HP_GPIO_AMP_INIT] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_gpio_amp_init, + .chained = true, + .chain_id = ALC285_FIXUP_HP_GPIO_LED + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7342,16 +7419,20 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK), + SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), + SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), @@ -7473,7 +7554,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), - SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), @@ -7495,6 +7576,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), @@ -7504,6 +7586,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), @@ -7523,11 +7607,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), + SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), + SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), @@ -7571,8 +7657,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), - SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), - SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), + SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK), SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c index f25ce50f1a901609b51aeb2d2cc86bd6de51d689..ebf4388b626215ffb0bc76ea22fb63624dbcea98 100644 --- a/sound/soc/amd/raven/pci-acp3x.c +++ b/sound/soc/amd/raven/pci-acp3x.c @@ -232,9 +232,7 @@ static int snd_acp3x_probe(struct pci_dev *pci, } pm_runtime_set_autosuspend_delay(&pci->dev, 2000); pm_runtime_use_autosuspend(&pci->dev); - pm_runtime_set_active(&pci->dev); pm_runtime_put_noidle(&pci->dev); - pm_runtime_enable(&pci->dev); pm_runtime_allow(&pci->dev); return 0; @@ -303,7 +301,7 @@ static void snd_acp3x_remove(struct pci_dev *pci) ret = acp3x_deinit(adata->acp3x_base); if (ret) dev_err(&pci->dev, "ACP de-init failed\n"); - pm_runtime_disable(&pci->dev); + pm_runtime_forbid(&pci->dev); pm_runtime_get_noresume(&pci->dev); pci_disable_msi(pci); pci_release_regions(pci); diff --git a/sound/soc/amd/renoir/Makefile b/sound/soc/amd/renoir/Makefile index e4371932a55a66a6215b6478646570a8dcb1131e..4a82690aec16a91207038bd495d449ae2a37a254 100644 --- a/sound/soc/amd/renoir/Makefile +++ b/sound/soc/amd/renoir/Makefile @@ -2,6 +2,7 @@ # Renoir platform Support snd-rn-pci-acp3x-objs := rn-pci-acp3x.o snd-acp3x-pdm-dma-objs := acp3x-pdm-dma.o -obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-rn-pci-acp3x.o -obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-acp3x-pdm-dma.o -obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH) += acp3x-rn.o +snd-acp3x-rn-objs := acp3x-rn.o +obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-rn-pci-acp3x.o +obj-$(CONFIG_SND_SOC_AMD_RENOIR) += snd-acp3x-pdm-dma.o +obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH) += snd-acp3x-rn.o diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c index 96718e3a1ad0e8d2dbd34d981e311770d5b5ccd0..d87402a86c88006379927fa08c1dd994d0ed4703 100644 --- a/sound/soc/codecs/max98373.c +++ b/sound/soc/codecs/max98373.c @@ -779,13 +779,6 @@ static int max98373_probe(struct snd_soc_component *component) regmap_write(max98373->regmap, MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2, 0x1); - /* Set inital volume (0dB) */ - regmap_write(max98373->regmap, - MAX98373_R203D_AMP_DIG_VOL_CTRL, - 0x00); - regmap_write(max98373->regmap, - MAX98373_R203E_AMP_PATH_GAIN, - 0x00); /* Enable DC blocker */ regmap_write(max98373->regmap, MAX98373_R203F_AMP_DSP_CFG, @@ -869,7 +862,6 @@ static const struct snd_soc_component_driver soc_codec_dev_max98373 = { .num_dapm_widgets = ARRAY_SIZE(max98373_dapm_widgets), .dapm_routes = max98373_audio_map, .num_dapm_routes = ARRAY_SIZE(max98373_audio_map), - .idle_bias_on = 1, .use_pmdown_time = 1, .endianness = 1, .non_legacy_dai_naming = 1, diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index 9593a9a27bf85869240127249043722f3552f75f..e8d14eefc41bb973d09a1b48a84418a49f47f77a 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c @@ -272,13 +272,13 @@ static int rt286_jack_detect(struct rt286_priv *rt286, bool *hp, bool *mic) regmap_read(rt286->regmap, RT286_GET_MIC1_SENSE, &buf); *mic = buf & 0x80000000; } - if (!*mic) { + + if (!*hp) { snd_soc_dapm_disable_pin(dapm, "HV"); snd_soc_dapm_disable_pin(dapm, "VREF"); - } - if (!*hp) snd_soc_dapm_disable_pin(dapm, "LDO1"); - snd_soc_dapm_sync(dapm); + snd_soc_dapm_sync(dapm); + } return 0; } diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index 70fee6849ab00e260e765de8e27f8c0cb87d19b2..dfbc0ca38ff7fef36d7c6b4e06066157cfd5ea23 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -31,18 +31,19 @@ #include "rt5670.h" #include "rt5670-dsp.h" -#define RT5670_DEV_GPIO BIT(0) -#define RT5670_IN2_DIFF BIT(1) -#define RT5670_DMIC_EN BIT(2) -#define RT5670_DMIC1_IN2P BIT(3) -#define RT5670_DMIC1_GPIO6 BIT(4) -#define RT5670_DMIC1_GPIO7 BIT(5) -#define RT5670_DMIC2_INR BIT(6) -#define RT5670_DMIC2_GPIO8 BIT(7) -#define RT5670_DMIC3_GPIO5 BIT(8) -#define RT5670_JD_MODE1 BIT(9) -#define RT5670_JD_MODE2 BIT(10) -#define RT5670_JD_MODE3 BIT(11) +#define RT5670_DEV_GPIO BIT(0) +#define RT5670_IN2_DIFF BIT(1) +#define RT5670_DMIC_EN BIT(2) +#define RT5670_DMIC1_IN2P BIT(3) +#define RT5670_DMIC1_GPIO6 BIT(4) +#define RT5670_DMIC1_GPIO7 BIT(5) +#define RT5670_DMIC2_INR BIT(6) +#define RT5670_DMIC2_GPIO8 BIT(7) +#define RT5670_DMIC3_GPIO5 BIT(8) +#define RT5670_JD_MODE1 BIT(9) +#define RT5670_JD_MODE2 BIT(10) +#define RT5670_JD_MODE3 BIT(11) +#define RT5670_GPIO1_IS_EXT_SPK_EN BIT(12) static unsigned long rt5670_quirk; static unsigned int quirk_override; @@ -602,9 +603,9 @@ int rt5670_set_jack_detect(struct snd_soc_component *component, EXPORT_SYMBOL_GPL(rt5670_set_jack_detect); static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0); -static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); +static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0); static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0); -static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); +static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000); static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ @@ -1447,6 +1448,33 @@ static int rt5670_hp_event(struct snd_soc_dapm_widget *w, return 0; } +static int rt5670_spk_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component); + + if (!rt5670->pdata.gpio1_is_ext_spk_en) + return 0; + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, + RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_HI); + break; + + case SND_SOC_DAPM_PRE_PMD: + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, + RT5670_GP1_OUT_MASK, RT5670_GP1_OUT_LO); + break; + + default: + return 0; + } + + return 0; +} + static int rt5670_bst1_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -1860,7 +1888,9 @@ static const struct snd_soc_dapm_widget rt5670_specific_dapm_widgets[] = { }; static const struct snd_soc_dapm_widget rt5672_specific_dapm_widgets[] = { - SND_SOC_DAPM_PGA("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_PGA_E("SPO Amp", SND_SOC_NOPM, 0, 0, NULL, 0, + rt5670_spk_event, SND_SOC_DAPM_PRE_PMD | + SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_OUTPUT("SPOLP"), SND_SOC_DAPM_OUTPUT("SPOLN"), SND_SOC_DAPM_OUTPUT("SPORP"), @@ -2857,14 +2887,14 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = { }, { .callback = rt5670_quirk_cb, - .ident = "Lenovo Thinkpad Tablet 10", + .ident = "Lenovo Miix 2 10", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Miix 2 10"), }, .driver_data = (unsigned long *)(RT5670_DMIC_EN | RT5670_DMIC1_IN2P | - RT5670_DEV_GPIO | + RT5670_GPIO1_IS_EXT_SPK_EN | RT5670_JD_MODE2), }, { @@ -2924,6 +2954,10 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, rt5670->pdata.dev_gpio = true; dev_info(&i2c->dev, "quirk dev_gpio\n"); } + if (rt5670_quirk & RT5670_GPIO1_IS_EXT_SPK_EN) { + rt5670->pdata.gpio1_is_ext_spk_en = true; + dev_info(&i2c->dev, "quirk GPIO1 is external speaker enable\n"); + } if (rt5670_quirk & RT5670_IN2_DIFF) { rt5670->pdata.in2_diff = true; dev_info(&i2c->dev, "quirk IN2_DIFF\n"); @@ -3023,6 +3057,13 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT); } + if (rt5670->pdata.gpio1_is_ext_spk_en) { + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL1, + RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_GPIO1); + regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2, + RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT); + } + if (rt5670->pdata.jd_mode) { regmap_update_bits(rt5670->regmap, RT5670_GLB_CLK, RT5670_SCLK_SRC_MASK, RT5670_SCLK_SRC_RCCLK); diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h index a8c3e44770b82b9c616b1fd5e7c998362c93aa00..de0203369b7cd92bb872cc82ab25c9c759e39916 100644 --- a/sound/soc/codecs/rt5670.h +++ b/sound/soc/codecs/rt5670.h @@ -757,7 +757,7 @@ #define RT5670_PWR_VREF2_BIT 4 #define RT5670_PWR_FV2 (0x1 << 3) #define RT5670_PWR_FV2_BIT 3 -#define RT5670_LDO_SEL_MASK (0x3) +#define RT5670_LDO_SEL_MASK (0x7) #define RT5670_LDO_SEL_SFT 0 /* Power Management for Analog 2 (0x64) */ diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 3e9d2c6c51f9a60cacab882ddcf91f0fa2601ddc..d503b5bef4ba9ba62dc33c48a2660c6d2d652593 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -932,7 +932,9 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert) RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2); snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3, RT5682_PWR_CBJ, RT5682_PWR_CBJ); - + snd_soc_component_update_bits(component, + RT5682_HP_CHARGE_PUMP_1, + RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0); snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH); @@ -956,17 +958,21 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert) rt5682->jack_type = SND_JACK_HEADPHONE; break; } + + snd_soc_component_update_bits(component, + RT5682_HP_CHARGE_PUMP_1, + RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, + RT5682_OSW_L_EN | RT5682_OSW_R_EN); } else { rt5682_enable_push_button_irq(component, false); snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW); - if (snd_soc_dapm_get_pin_status(dapm, "MICBIAS")) + if (!snd_soc_dapm_get_pin_status(dapm, "MICBIAS")) snd_soc_component_update_bits(component, - RT5682_PWR_ANLG_1, RT5682_PWR_VREF2, 0); - else + RT5682_PWR_ANLG_1, RT5682_PWR_MB, 0); + if (!snd_soc_dapm_get_pin_status(dapm, "Vref2")) snd_soc_component_update_bits(component, - RT5682_PWR_ANLG_1, - RT5682_PWR_VREF2 | RT5682_PWR_MB, 0); + RT5682_PWR_ANLG_1, RT5682_PWR_VREF2, 0); snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3, RT5682_PWR_CBJ, 0); @@ -985,16 +991,17 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component, rt5682->hs_jack = hs_jack; - if (!rt5682->is_sdw) { - if (!hs_jack) { - regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2, - RT5682_JD1_EN_MASK, RT5682_JD1_DIS); - regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL, - RT5682_POW_JDH | RT5682_POW_JDL, 0); - cancel_delayed_work_sync(&rt5682->jack_detect_work); - return 0; - } + if (!hs_jack) { + regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2, + RT5682_JD1_EN_MASK, RT5682_JD1_DIS); + regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL, + RT5682_POW_JDH | RT5682_POW_JDL, 0); + cancel_delayed_work_sync(&rt5682->jack_detect_work); + + return 0; + } + if (!rt5682->is_sdw) { switch (rt5682->pdata.jd_src) { case RT5682_JD1: snd_soc_component_update_bits(component, @@ -1075,7 +1082,8 @@ void rt5682_jack_detect_handler(struct work_struct *work) /* jack was out, report jack type */ rt5682->jack_type = rt5682_headset_detect(rt5682->component, 1); - } else { + } else if ((rt5682->jack_type & SND_JACK_HEADSET) == + SND_JACK_HEADSET) { /* jack is already in, report button event */ rt5682->jack_type = SND_JACK_HEADSET; btn_type = rt5682_button_detect(rt5682->component); @@ -1601,8 +1609,7 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = { 0, set_filter_clk, SND_SOC_DAPM_PRE_PMU), SND_SOC_DAPM_SUPPLY("Vref1", RT5682_PWR_ANLG_1, RT5682_PWR_VREF1_BIT, 0, rt5682_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), - SND_SOC_DAPM_SUPPLY("Vref2", RT5682_PWR_ANLG_1, RT5682_PWR_VREF2_BIT, 0, - NULL, 0), + SND_SOC_DAPM_SUPPLY("Vref2", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("MICBIAS", SND_SOC_NOPM, 0, 0, NULL, 0), /* ASRC */ @@ -2485,6 +2492,15 @@ static int rt5682_wclk_prepare(struct clk_hw *hw) snd_soc_dapm_force_enable_pin_unlocked(dapm, "MICBIAS"); snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, RT5682_PWR_MB, RT5682_PWR_MB); + + snd_soc_dapm_force_enable_pin_unlocked(dapm, "Vref2"); + snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, + RT5682_PWR_VREF2 | RT5682_PWR_FV2, + RT5682_PWR_VREF2); + usleep_range(55000, 60000); + snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, + RT5682_PWR_FV2, RT5682_PWR_FV2); + snd_soc_dapm_force_enable_pin_unlocked(dapm, "I2S1"); snd_soc_dapm_force_enable_pin_unlocked(dapm, "PLL2F"); snd_soc_dapm_force_enable_pin_unlocked(dapm, "PLL2B"); @@ -2510,9 +2526,12 @@ static void rt5682_wclk_unprepare(struct clk_hw *hw) snd_soc_dapm_mutex_lock(dapm); snd_soc_dapm_disable_pin_unlocked(dapm, "MICBIAS"); + snd_soc_dapm_disable_pin_unlocked(dapm, "Vref2"); if (!rt5682->jack_type) snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, + RT5682_PWR_VREF2 | RT5682_PWR_FV2 | RT5682_PWR_MB, 0); + snd_soc_dapm_disable_pin_unlocked(dapm, "I2S1"); snd_soc_dapm_disable_pin_unlocked(dapm, "PLL2F"); snd_soc_dapm_disable_pin_unlocked(dapm, "PLL2B"); diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c index 06ba36595ddd61b1650a9b940329801ee2fc8afa..7cfc89602fc397bc941e3790917c9497a7efe4ce 100644 --- a/sound/soc/codecs/wm8974.c +++ b/sound/soc/codecs/wm8974.c @@ -186,7 +186,7 @@ SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0), /* Boost mixer */ static const struct snd_kcontrol_new wm8974_boost_mixer[] = { -SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 0), +SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 1), }; /* Input PGA */ @@ -474,6 +474,10 @@ static int wm8974_set_dai_fmt(struct snd_soc_dai *codec_dai, iface |= 0x0008; break; case SND_SOC_DAIFMT_DSP_A: + if ((fmt & SND_SOC_DAIFMT_INV_MASK) == SND_SOC_DAIFMT_IB_IF || + (fmt & SND_SOC_DAIFMT_INV_MASK) == SND_SOC_DAIFMT_NB_IF) { + return -EINVAL; + } iface |= 0x00018; break; default: diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c index 0c813a45bba7c1e17144e1a54fc9c731d07d2e5c..69aeb0e71844d9f614e156f4e8a0cbcdb41d728f 100644 --- a/sound/soc/fsl/fsl_mqs.c +++ b/sound/soc/fsl/fsl_mqs.c @@ -265,12 +265,20 @@ static int fsl_mqs_remove(struct platform_device *pdev) static int fsl_mqs_runtime_resume(struct device *dev) { struct fsl_mqs *mqs_priv = dev_get_drvdata(dev); + int ret; - if (mqs_priv->ipg) - clk_prepare_enable(mqs_priv->ipg); + ret = clk_prepare_enable(mqs_priv->ipg); + if (ret) { + dev_err(dev, "failed to enable ipg clock\n"); + return ret; + } - if (mqs_priv->mclk) - clk_prepare_enable(mqs_priv->mclk); + ret = clk_prepare_enable(mqs_priv->mclk); + if (ret) { + dev_err(dev, "failed to enable mclk clock\n"); + clk_disable_unprepare(mqs_priv->ipg); + return ret; + } if (mqs_priv->use_gpr) regmap_write(mqs_priv->regmap, IOMUXC_GPR2, @@ -292,11 +300,8 @@ static int fsl_mqs_runtime_suspend(struct device *dev) regmap_read(mqs_priv->regmap, REG_MQS_CTRL, &mqs_priv->reg_mqs_ctrl); - if (mqs_priv->mclk) - clk_disable_unprepare(mqs_priv->mclk); - - if (mqs_priv->ipg) - clk_disable_unprepare(mqs_priv->ipg); + clk_disable_unprepare(mqs_priv->mclk); + clk_disable_unprepare(mqs_priv->ipg); return 0; } diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c index 9ad35d9940fef6436761ed9d851e06610e7e138c..97b4f5480a31cb94c6c39a2a7d6466744e7d017d 100644 --- a/sound/soc/generic/audio-graph-card.c +++ b/sound/soc/generic/audio-graph-card.c @@ -317,8 +317,8 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv, if (ret < 0) goto out_put_node; - dai_link->dpcm_playback = 1; - dai_link->dpcm_capture = 1; + snd_soc_dai_link_set_capabilities(dai_link); + dai_link->ops = &graph_ops; dai_link->init = asoc_simple_dai_init; diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 55e9f8800b3e103638e38b651aa9e37ac37fdcc7..04d4d28ed5112891cfce65920247c81a929e289e 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -231,8 +231,8 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv, if (ret < 0) goto out_put_node; - dai_link->dpcm_playback = 1; - dai_link->dpcm_capture = 1; + snd_soc_dai_link_set_capabilities(dai_link); + dai_link->ops = &simple_ops; dai_link->init = asoc_simple_dai_init; diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c index 5f96d7ac0a226e2d98e6c6567692c378c33133b9..bed4d5f73d9cffe6887ae74aa718dd03e7d3d084 100644 --- a/sound/soc/intel/boards/bdw-rt5677.c +++ b/sound/soc/intel/boards/bdw-rt5677.c @@ -354,6 +354,7 @@ static struct snd_soc_dai_link bdw_rt5677_dais[] = { { .name = "Codec DSP", .stream_name = "Wake on Voice", + .capture_only = 1, .ops = &bdw_rt5677_dsp_ops, SND_SOC_DAILINK_REG(dsp), }, diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c index 9e5fc9430628ccefc53ea6bb6f1482b39c32c043..ecbc58e8a37f5cb292f951ba62f725c2c41d6800 100644 --- a/sound/soc/intel/boards/bytcht_es8316.c +++ b/sound/soc/intel/boards/bytcht_es8316.c @@ -543,8 +543,10 @@ static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev) if (cnt) { ret = device_add_properties(codec_dev, props); - if (ret) + if (ret) { + put_device(codec_dev); return ret; + } } devm_acpi_dev_add_driver_gpios(codec_dev, byt_cht_es8316_gpios); diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c index 7a43c70a1378a2e62a22102377b1dc1d4f19387f..22e432768edb3e9702aba9e6ce6d45b65438d898 100644 --- a/sound/soc/intel/boards/cht_bsw_rt5672.c +++ b/sound/soc/intel/boards/cht_bsw_rt5672.c @@ -253,21 +253,20 @@ static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd, params_set_format(params, SNDRV_PCM_FORMAT_S24_LE); /* - * Default mode for SSP configuration is TDM 4 slot + * Default mode for SSP configuration is TDM 4 slot. One board/design, + * the Lenovo Miix 2 10 uses not 1 but 2 codecs connected to SSP2. The + * second piggy-backed, output-only codec is inside the keyboard-dock + * (which has extra speakers). Unlike the main rt5672 codec, we cannot + * configure this codec, it is hard coded to use 2 channel 24 bit I2S. + * Since we only support 2 channels anyways, there is no need for TDM + * on any cht-bsw-rt5672 designs. So we simply use I2S 2ch everywhere. */ - ret = snd_soc_dai_set_fmt(asoc_rtd_to_codec(rtd, 0), - SND_SOC_DAIFMT_DSP_B | - SND_SOC_DAIFMT_IB_NF | + ret = snd_soc_dai_set_fmt(asoc_rtd_to_cpu(rtd, 0), + SND_SOC_DAIFMT_I2S | + SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) { - dev_err(rtd->dev, "can't set format to TDM %d\n", ret); - return ret; - } - - /* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */ - ret = snd_soc_dai_set_tdm_slot(asoc_rtd_to_codec(rtd, 0), 0xF, 0xF, 4, 24); - if (ret < 0) { - dev_err(rtd->dev, "can't set codec TDM slot %d\n", ret); + dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret); return ret; } diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig index f51b28d1b94d87b5fe31c4b70f4ce532ed884850..92f51d0e9fe2bf020acb9aa0a555356fa8628ce3 100644 --- a/sound/soc/qcom/Kconfig +++ b/sound/soc/qcom/Kconfig @@ -72,7 +72,7 @@ config SND_SOC_QDSP6_ASM_DAI config SND_SOC_QDSP6 tristate "SoC ALSA audio driver for QDSP6" - depends on QCOM_APR && HAS_DMA + depends on QCOM_APR select SND_SOC_QDSP6_COMMON select SND_SOC_QDSP6_CORE select SND_SOC_QDSP6_AFE diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c index f45e5aaa4b3028f21e53e5514bea4be856459927..9539b0d024fed224577fb000a8077fda874a74f3 100644 --- a/sound/soc/rockchip/rk3399_gru_sound.c +++ b/sound/soc/rockchip/rk3399_gru_sound.c @@ -219,19 +219,32 @@ static int rockchip_sound_dmic_hw_params(struct snd_pcm_substream *substream, return 0; } +static int rockchip_sound_startup(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + + runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; + return snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE, + 8000, 96000); +} + static const struct snd_soc_ops rockchip_sound_max98357a_ops = { + .startup = rockchip_sound_startup, .hw_params = rockchip_sound_max98357a_hw_params, }; static const struct snd_soc_ops rockchip_sound_rt5514_ops = { + .startup = rockchip_sound_startup, .hw_params = rockchip_sound_rt5514_hw_params, }; static const struct snd_soc_ops rockchip_sound_da7219_ops = { + .startup = rockchip_sound_startup, .hw_params = rockchip_sound_da7219_hw_params, }; static const struct snd_soc_ops rockchip_sound_dmic_ops = { + .startup = rockchip_sound_startup, .hw_params = rockchip_sound_dmic_hw_params, }; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 0f30f5aabaa87c54478766fd86ba7b88c84e1ec7..2b8abf88ec6032f3a95dad93072ce67e33f2aac9 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2572,6 +2572,33 @@ int snd_soc_register_component(struct device *dev, } EXPORT_SYMBOL_GPL(snd_soc_register_component); +/** + * snd_soc_unregister_component_by_driver - Unregister component using a given driver + * from the ASoC core + * + * @dev: The device to unregister + * @component_driver: The component driver to unregister + */ +void snd_soc_unregister_component_by_driver(struct device *dev, + const struct snd_soc_component_driver *component_driver) +{ + struct snd_soc_component *component; + + if (!component_driver) + return; + + mutex_lock(&client_mutex); + component = snd_soc_lookup_component_nolocked(dev, component_driver->name); + if (!component) + goto out; + + snd_soc_del_component_unlocked(component); + +out: + mutex_unlock(&client_mutex); +} +EXPORT_SYMBOL_GPL(snd_soc_unregister_component_by_driver); + /** * snd_soc_unregister_component - Unregister all related component * from the ASoC core diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c index b05e18b63a1c3c70e16a5408240995b05f4a9145..457159975b01a22237e81810855940e4b955618c 100644 --- a/sound/soc/soc-dai.c +++ b/sound/soc/soc-dai.c @@ -391,6 +391,44 @@ bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int dir) return stream->channels_min; } +/* + * snd_soc_dai_link_set_capabilities() - set dai_link properties based on its DAIs + */ +void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link) +{ + struct snd_soc_dai_link_component *cpu; + struct snd_soc_dai_link_component *codec; + struct snd_soc_dai *dai; + bool supported[SNDRV_PCM_STREAM_LAST + 1]; + int direction; + int i; + + for_each_pcm_streams(direction) { + supported[direction] = true; + + for_each_link_cpus(dai_link, i, cpu) { + dai = snd_soc_find_dai(cpu); + if (!dai || !snd_soc_dai_stream_valid(dai, direction)) { + supported[direction] = false; + break; + } + } + if (!supported[direction]) + continue; + for_each_link_codecs(dai_link, i, codec) { + dai = snd_soc_find_dai(codec); + if (!dai || !snd_soc_dai_stream_valid(dai, direction)) { + supported[direction] = false; + break; + } + } + } + + dai_link->dpcm_playback = supported[SNDRV_PCM_STREAM_PLAYBACK]; + dai_link->dpcm_capture = supported[SNDRV_PCM_STREAM_CAPTURE]; +} +EXPORT_SYMBOL_GPL(snd_soc_dai_link_set_capabilities); + void snd_soc_dai_action(struct snd_soc_dai *dai, int stream, int action) { diff --git a/sound/soc/soc-devres.c b/sound/soc/soc-devres.c index 11e5d79623707a7b36803323056bc179a09dab5d..4534a1c03e8e5e85f9f5e4e889197edd88652649 100644 --- a/sound/soc/soc-devres.c +++ b/sound/soc/soc-devres.c @@ -48,7 +48,9 @@ EXPORT_SYMBOL_GPL(devm_snd_soc_register_dai); static void devm_component_release(struct device *dev, void *res) { - snd_soc_unregister_component(*(struct device **)res); + const struct snd_soc_component_driver **cmpnt_drv = res; + + snd_soc_unregister_component_by_driver(dev, *cmpnt_drv); } /** @@ -65,7 +67,7 @@ int devm_snd_soc_register_component(struct device *dev, const struct snd_soc_component_driver *cmpnt_drv, struct snd_soc_dai_driver *dai_drv, int num_dai) { - struct device **ptr; + const struct snd_soc_component_driver **ptr; int ret; ptr = devres_alloc(devm_component_release, sizeof(*ptr), GFP_KERNEL); @@ -74,7 +76,7 @@ int devm_snd_soc_register_component(struct device *dev, ret = snd_soc_register_component(dev, cmpnt_drv, dai_drv, num_dai); if (ret == 0) { - *ptr = dev; + *ptr = cmpnt_drv; devres_add(dev, ptr); } else { devres_free(ptr); diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index 80a4e71f2d95d6254a6e45b8ffe4600c3a7151bb..61844403f1817fd2bdc7c781f0d1c545e5f16f97 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -478,7 +478,7 @@ void snd_dmaengine_pcm_unregister(struct device *dev) pcm = soc_component_to_pcm(component); - snd_soc_unregister_component(dev); + snd_soc_unregister_component_by_driver(dev, component->driver); dmaengine_pcm_release_chan(pcm); kfree(pcm); } diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 43e5745b06aa7fe91e96472f5289cf547bcf799a..6eaa00c210117265e778fa6ccb528d530e7f556f 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -1261,17 +1261,29 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg, list_add(&routes[i]->dobj.list, &tplg->comp->dobj_list); ret = soc_tplg_add_route(tplg, routes[i]); - if (ret < 0) + if (ret < 0) { + /* + * this route was added to the list, it will + * be freed in remove_route() so increment the + * counter to skip it in the error handling + * below. + */ + i++; break; + } /* add route, but keep going if some fail */ snd_soc_dapm_add_routes(dapm, routes[i], 1); } - /* free memory allocated for all dapm routes in case of error */ - if (ret < 0) - for (i = 0; i < count ; i++) - kfree(routes[i]); + /* + * free memory allocated for all dapm routes not added to the + * list in case of error + */ + if (ret < 0) { + while (i < count) + kfree(routes[i++]); + } /* * free pointer to array of dapm routes as this is no longer needed. @@ -1359,7 +1371,6 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create( if (err < 0) { dev_err(tplg->dev, "ASoC: failed to init %s\n", mc->hdr.name); - soc_tplg_free_tlv(tplg, &kc[i]); goto err_sm; } } @@ -1367,6 +1378,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create( err_sm: for (; i >= 0; i--) { + soc_tplg_free_tlv(tplg, &kc[i]); sm = (struct soc_mixer_control *)kc[i].private_value; kfree(sm); kfree(kc[i].name); diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c index 339c4930b0c0a77d0c613a4d3b41ee982bd2a66b..adc7c37145d6407c173c942cd58f61103f0c3bbb 100644 --- a/sound/soc/sof/core.c +++ b/sound/soc/sof/core.c @@ -345,15 +345,15 @@ int snd_sof_device_remove(struct device *dev) struct snd_sof_pdata *pdata = sdev->pdata; int ret; - ret = snd_sof_dsp_power_down_notify(sdev); - if (ret < 0) - dev_warn(dev, "error: %d failed to prepare DSP for device removal", - ret); - if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE)) cancel_work_sync(&sdev->probe_work); if (sdev->fw_state > SOF_FW_BOOT_NOT_STARTED) { + ret = snd_sof_dsp_power_down_notify(sdev); + if (ret < 0) + dev_warn(dev, "error: %d failed to prepare DSP for device removal", + ret); + snd_sof_fw_unload(sdev); snd_sof_ipc_free(sdev); snd_sof_free_debug(sdev); diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c index 63f9c20a1bacf57b2e0cf3cadd2e0f636d486215..a4fa8451d8cb39feaedf7e0d26b5a7f1d57e955c 100644 --- a/sound/soc/sof/imx/imx8.c +++ b/sound/soc/sof/imx/imx8.c @@ -375,6 +375,14 @@ static int imx8_ipc_pcm_params(struct snd_sof_dev *sdev, static struct snd_soc_dai_driver imx8_dai[] = { { .name = "esai-port", + .playback = { + .channels_min = 1, + .channels_max = 8, + }, + .capture = { + .channels_min = 1, + .channels_max = 8, + }, }, }; diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c index fa86a9e2990f8722aebebf8f591333d948c53a02..287114a37688c676f503495a2c95c8032683ce92 100644 --- a/sound/soc/sof/imx/imx8m.c +++ b/sound/soc/sof/imx/imx8m.c @@ -240,6 +240,14 @@ static int imx8m_ipc_pcm_params(struct snd_sof_dev *sdev, static struct snd_soc_dai_driver imx8m_dai[] = { { .name = "sai-port", + .playback = { + .channels_min = 1, + .channels_max = 32, + }, + .capture = { + .channels_min = 1, + .channels_max = 32, + }, }, }; diff --git a/sound/usb/card.h b/sound/usb/card.h index d6219fba96995bf187284a813546c005340b4a70..de43267b9c8af77dca423d8a1bc6079323806ac9 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -84,10 +84,10 @@ struct snd_usb_endpoint { dma_addr_t sync_dma; /* DMA address of syncbuf */ unsigned int pipe; /* the data i/o pipe */ - unsigned int framesize[2]; /* small/large frame sizes in samples */ - unsigned int sample_rem; /* remainder from division fs/fps */ + unsigned int packsize[2]; /* small/large packet sizes in samples */ + unsigned int sample_rem; /* remainder from division fs/pps */ unsigned int sample_accum; /* sample accumulator */ - unsigned int fps; /* frames per second */ + unsigned int pps; /* packets per second */ unsigned int freqn; /* nominal sampling rate in fs/fps in Q16.16 format */ unsigned int freqm; /* momentary sampling rate in fs/fps in Q16.16 format */ int freqshift; /* how much to shift the feedback value to get Q16.16 */ diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 9bea7d3f99f88f169806aa9f11bdffe9daaa1a78..88760268fb5568eddbb7cdf90e7961664f31b717 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -159,11 +159,11 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) return ep->maxframesize; ep->sample_accum += ep->sample_rem; - if (ep->sample_accum >= ep->fps) { - ep->sample_accum -= ep->fps; - ret = ep->framesize[1]; + if (ep->sample_accum >= ep->pps) { + ep->sample_accum -= ep->pps; + ret = ep->packsize[1]; } else { - ret = ep->framesize[0]; + ret = ep->packsize[0]; } return ret; @@ -1088,15 +1088,15 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) { ep->freqn = get_usb_full_speed_rate(rate); - ep->fps = 1000; + ep->pps = 1000 >> ep->datainterval; } else { ep->freqn = get_usb_high_speed_rate(rate); - ep->fps = 8000; + ep->pps = 8000 >> ep->datainterval; } - ep->sample_rem = rate % ep->fps; - ep->framesize[0] = rate / ep->fps; - ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps; + ep->sample_rem = rate % ep->pps; + ep->packsize[0] = rate / ep->pps; + ep->packsize[1] = (rate + (ep->pps - 1)) / ep->pps; /* calculate the frequency in 16.16 format */ ep->freqm = ep->freqn; diff --git a/sound/usb/line6/capture.c b/sound/usb/line6/capture.c index 663d608c4287cd22d66f3324605c07d3cf459a67..970c9bdce0b21648a3ba57fe1444748d9bfcafa4 100644 --- a/sound/usb/line6/capture.c +++ b/sound/usb/line6/capture.c @@ -286,6 +286,8 @@ int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm) urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_in_callback; + if (usb_urb_ep_type_check(urb)) + return -EINVAL; } return 0; diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 7629116f570ef222e47533a31e17f0c8204d322b..2746d969818070b999dc75e97b8fe6e12f9f4948 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -840,7 +840,7 @@ void line6_disconnect(struct usb_interface *interface) if (WARN_ON(usbdev != line6->usbdev)) return; - cancel_delayed_work(&line6->startup_work); + cancel_delayed_work_sync(&line6->startup_work); if (line6->urb_listen != NULL) line6_stop_listen(line6); diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c index 01930ce7bd75be9093204595f773607a3d3ecd40..8233c61e23f168e3d742700351ffa1475293b6f7 100644 --- a/sound/usb/line6/playback.c +++ b/sound/usb/line6/playback.c @@ -431,6 +431,8 @@ int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm) urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_out_callback; + if (usb_urb_ep_type_check(urb)) + return -EINVAL; } return 0; diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 047b90595d6581449c2e291e895e19851a6f84a0..354f576929384ceaca61b369d0f9e618f04f6d20 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1499,6 +1499,8 @@ void snd_usbmidi_disconnect(struct list_head *p) spin_unlock_irq(&umidi->disc_lock); up_write(&umidi->disc_rwsem); + del_timer_sync(&umidi->error_timer); + for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { struct snd_usb_midi_endpoint *ep = &umidi->endpoints[i]; if (ep->out) @@ -1525,7 +1527,6 @@ void snd_usbmidi_disconnect(struct list_head *p) ep->in = NULL; } } - del_timer_sync(&umidi->error_timer); } EXPORT_SYMBOL(snd_usbmidi_disconnect); @@ -2301,16 +2302,22 @@ void snd_usbmidi_input_stop(struct list_head *p) } EXPORT_SYMBOL(snd_usbmidi_input_stop); -static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint *ep) +static void snd_usbmidi_input_start_ep(struct snd_usb_midi *umidi, + struct snd_usb_midi_in_endpoint *ep) { unsigned int i; + unsigned long flags; if (!ep) return; for (i = 0; i < INPUT_URBS; ++i) { struct urb *urb = ep->urbs[i]; - urb->dev = ep->umidi->dev; - snd_usbmidi_submit_urb(urb, GFP_KERNEL); + spin_lock_irqsave(&umidi->disc_lock, flags); + if (!atomic_read(&urb->use_count)) { + urb->dev = ep->umidi->dev; + snd_usbmidi_submit_urb(urb, GFP_ATOMIC); + } + spin_unlock_irqrestore(&umidi->disc_lock, flags); } } @@ -2326,7 +2333,7 @@ void snd_usbmidi_input_start(struct list_head *p) if (umidi->input_running || !umidi->opened[1]) return; for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) - snd_usbmidi_input_start_ep(umidi->endpoints[i].in); + snd_usbmidi_input_start_ep(umidi, umidi->endpoints[i].in); umidi->input_running = 1; } EXPORT_SYMBOL(snd_usbmidi_input_start); diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index a777d36c4f5a87d88e07cf0f151784d472c875b0..a69d9e75f66fe4c8a75e85ca1998950dbb3f7fae 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -367,7 +367,9 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ifnum = 0; goto add_sync_ep_from_ifnum; case USB_ID(0x07fd, 0x0008): /* MOTU M Series */ + case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */ case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */ + case USB_ID(0x0d9a, 0x00df): /* RTX6001 */ ep = 0x81; ifnum = 2; goto add_sync_ep_from_ifnum; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 4ec491011b19c2ba7196226dd2240a25eac065ce..9092cc0aa8072b4fc7a9a7ddf24a2917a022def2 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3633,4 +3633,56 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */ } }, +/* + * MacroSilicon MS2109 based HDMI capture cards + * + * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch. + * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if + * they pretend to be 96kHz mono as a workaround for stereo being broken + * by that... + * + * They also have swapped L-R channels, but that's for userspace to deal + * with. + */ +{ + USB_DEVICE(0x534d, 0x2109), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "MacroSilicon", + .product_name = "MS2109", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = &(const struct snd_usb_audio_quirk[]) { + { + .ifnum = 2, + .type = QUIRK_AUDIO_ALIGN_TRANSFER, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_MIXER, + }, + { + .ifnum = 3, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels = 2, + .iface = 3, + .altsetting = 1, + .altset_idx = 1, + .attributes = 0, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC | + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .rate_min = 48000, + .rate_max = 48000, + } + }, + { + .ifnum = -1 + } + } + } +}, + #undef USB_DEVICE_VENDOR_SPEC diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 17c5a038f42d3978d1b06d7cec5f8c8afd92eaaf..0780f97c185088ce214bf7f57d917fda7aff730b 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -408,14 +408,15 @@ struct kvm_vmx_nested_state_data { }; struct kvm_vmx_nested_state_hdr { - __u32 flags; __u64 vmxon_pa; __u64 vmcs12_pa; - __u64 preemption_timer_deadline; struct { __u16 flags; } smm; + + __u32 flags; + __u64 preemption_timer_deadline; }; struct kvm_svm_nested_state_data { diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h index 4671fbf28842718fa74c9664dee74f8e129dbb7e..7f475d59a0974f17d7a9765c643b798a1dc6ee83 100644 --- a/tools/include/linux/bits.h +++ b/tools/include/linux/bits.h @@ -18,8 +18,7 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#if !defined(__ASSEMBLY__) && \ - (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000) +#if !defined(__ASSEMBLY__) #include #define GENMASK_INPUT_CHECK(h, l) \ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 974a71342aea681d330bff9074810d3c73f125e5..8bd33050b7bbb6e0c5e1d0bf2e1e7ffc11b2caa4 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3171,13 +3171,12 @@ union bpf_attr { * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) * Description * Copy *size* bytes from *data* into a ring buffer *ringbuf*. - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of - * new data availability is sent. - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of - * new data availability is sent unconditionally. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. * Return - * 0, on success; - * < 0, on error. + * 0 on success, or a negative error in case of failure. * * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) * Description @@ -3189,20 +3188,20 @@ union bpf_attr { * void bpf_ringbuf_submit(void *data, u64 flags) * Description * Submit reserved ring buffer sample, pointed to by *data*. - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of - * new data availability is sent. - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of - * new data availability is sent unconditionally. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. * Return * Nothing. Always succeeds. * * void bpf_ringbuf_discard(void *data, u64 flags) * Description * Discard reserved ring buffer sample, pointed to by *data*. - * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of - * new data availability is sent. - * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of - * new data availability is sent unconditionally. + * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification + * of new data availability is sent. + * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification + * of new data availability is sent unconditionally. * Return * Nothing. Always succeeds. * @@ -3210,16 +3209,18 @@ union bpf_attr { * Description * Query various characteristics of provided ring buffer. What * exactly is queries is determined by *flags*: - * - BPF_RB_AVAIL_DATA - amount of data not yet consumed; - * - BPF_RB_RING_SIZE - the size of ring buffer; - * - BPF_RB_CONS_POS - consumer position (can wrap around); - * - BPF_RB_PROD_POS - producer(s) position (can wrap around); - * Data returned is just a momentary snapshots of actual values + * + * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed. + * * **BPF_RB_RING_SIZE**: The size of ring buffer. + * * **BPF_RB_CONS_POS**: Consumer position (can wrap around). + * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around). + * + * Data returned is just a momentary snapshot of actual values * and could be inaccurate, so this facility should be used to * power heuristics and for reporting, not to make 100% correct * calculation. * Return - * Requested value, or 0, if flags are not recognized. + * Requested value, or 0, if *flags* are not recognized. * * int bpf_csum_level(struct sk_buff *skb, u64 level) * Description diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 1b6015b21ba85979bdbf792cca6ef43319ae9b3f..dbef24ebcfcb9d251d5d6fa07e9f1d55d4bcbc82 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -233,6 +233,8 @@ LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len, __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset, __u64 *probe_addr); + +enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */ LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type); #ifdef __cplusplus diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h index df59fd4fc95ba82c2f0ceb3caa7ef6a8ce75082a..e0af36b0e5d839e2fdd57f0f921c8ba17e4a3629 100644 --- a/tools/lib/bpf/hashmap.h +++ b/tools/lib/bpf/hashmap.h @@ -11,14 +11,18 @@ #include #include #include -#ifndef __WORDSIZE -#define __WORDSIZE (__SIZEOF_LONG__ * 8) -#endif static inline size_t hash_bits(size_t h, int bits) { /* shuffle bits and return requested number of upper bits */ - return (h * 11400714819323198485llu) >> (__WORDSIZE - bits); +#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__) + /* LP64 case */ + return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits); +#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__) + return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits); +#else +# error "Unsupported size_t size" +#endif } typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 477c679ed94566289095b460bca29288806ea1b0..11e4725b8b1c010cf9d198f7457a520556267c59 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4818,7 +4818,13 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path) err = -EINVAL; goto out; } - prog = bpf_object__find_program_by_title(obj, sec_name); + prog = NULL; + for (i = 0; i < obj->nr_programs; i++) { + if (!strcmp(obj->programs[i].section_name, sec_name)) { + prog = &obj->programs[i]; + break; + } + } if (!prog) { pr_warn("failed to find program '%s' for CO-RE offset relocation\n", sec_name); @@ -6653,7 +6659,7 @@ static const struct bpf_sec_def section_defs[] = { .expected_attach_type = BPF_TRACE_ITER, .is_attach_btf = true, .attach_fn = attach_iter), - BPF_EAPROG_SEC("xdp_devmap", BPF_PROG_TYPE_XDP, + BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP, BPF_XDP_DEVMAP), BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c index dbb9efbf718a065d4d6e9998fa4d147b1cc691fc..39ebf6192016d2671c79c890f0807f2b5aec6d60 100644 --- a/tools/lib/subcmd/parse-options.c +++ b/tools/lib/subcmd/parse-options.c @@ -237,6 +237,9 @@ static int get_value(struct parse_opt_ctx_t *p, return err; case OPTION_CALLBACK: + if (opt->set) + *(bool *)opt->set = true; + if (unset) return (*opt->callback)(opt, NULL, 1) ? (-1) : 0; if (opt->flags & PARSE_OPT_NOARG) diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index ee16be96697b5b7cc772dd2e98c51a57b9fe6e47..2dcffcf70defd402c6b654a9f1a71bda6dc3dc06 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -2861,6 +2861,7 @@ process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg, if (read_expected(TEP_EVENT_DELIM, ")") < 0) goto out_err; + free_token(token); type = read_token(&token); *tok = token; diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile index 946a4d31fcafbc3164d72f2f12aae00d7a07136c..47e8025532502fb65089e9d28b9da6545c6b13b6 100644 --- a/tools/lib/traceevent/plugins/Makefile +++ b/tools/lib/traceevent/plugins/Makefile @@ -199,7 +199,7 @@ define do_generate_dynamic_list_file xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\ if [ "$$symbol_type" = "U W" ];then \ (echo '{'; \ - $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\ + $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\ echo '};'; \ ) > $2; \ else \ diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json index 2df2e231e9eed6d2e4b8f708aa556fc18c1ebecd..24c4ba2a9ae54dc38f4b9df12ad38183c16cd9ba 100644 --- a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json +++ b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json @@ -380,7 +380,7 @@ { "Unit": "CPU-M-CF", "EventCode": "265", - "EventName": "DFLT_CCERROR", + "EventName": "DFLT_CCFINISH", "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2", "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2" }, diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh index 63a91ec473bb50d0cac40bfb6f2701c06852a8e5..045723b3d9928f7e25acfee6ed63e9d6b8588931 100755 --- a/tools/perf/tests/shell/record+zstd_comp_decomp.sh +++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh @@ -12,7 +12,8 @@ skip_if_no_z_record() { collect_z_record() { echo "Collecting compressed record file:" - $perf_tool record -o $trace_file -g -z -F 5000 -- \ + [[ "$(uname -m)" != s390x ]] && gflag='-g' + $perf_tool record -o $trace_file $gflag -z -F 5000 -- \ dd count=500 if=/dev/urandom of=/dev/null } diff --git a/tools/perf/util/hashmap.h b/tools/perf/util/hashmap.h index df59fd4fc95ba82c2f0ceb3caa7ef6a8ce75082a..e0af36b0e5d839e2fdd57f0f921c8ba17e4a3629 100644 --- a/tools/perf/util/hashmap.h +++ b/tools/perf/util/hashmap.h @@ -11,14 +11,18 @@ #include #include #include -#ifndef __WORDSIZE -#define __WORDSIZE (__SIZEOF_LONG__ * 8) -#endif static inline size_t hash_bits(size_t h, int bits) { /* shuffle bits and return requested number of upper bits */ - return (h * 11400714819323198485llu) >> (__WORDSIZE - bits); +#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__) + /* LP64 case */ + return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits); +#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__) + return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits); +#else +# error "Unsupported size_t size" +#endif } typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx); diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c index f7ee8fa377ad8b68f755fa97ba4e9f1c9c511a0a..6ccecbd394760caf1cd0e125a72f33005df3a7f2 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c @@ -5,10 +5,60 @@ #include "test_btf_map_in_map.skel.h" +static int duration; + +static __u32 bpf_map_id(struct bpf_map *map) +{ + struct bpf_map_info info; + __u32 info_len = sizeof(info); + int err; + + memset(&info, 0, info_len); + err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len); + if (err) + return 0; + return info.id; +} + +/* + * Trigger synchronize_rcu() in kernel. + * + * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger synchronize_rcu() + * if looking up an existing non-NULL element or updating the map with a valid + * inner map FD. Use this fact to trigger synchronize_rcu(): create map-in-map, + * create a trivial ARRAY map, update map-in-map with ARRAY inner map. Then + * cleanup. At the end, at least one synchronize_rcu() would be called. + */ +static int kern_sync_rcu(void) +{ + int inner_map_fd, outer_map_fd, err, zero = 0; + + inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0); + if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno)) + return -1; + + outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, + sizeof(int), inner_map_fd, 1, 0); + if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) { + close(inner_map_fd); + return -1; + } + + err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0); + if (err) + err = -errno; + CHECK(err, "outer_map_update", "failed %d\n", err); + close(inner_map_fd); + close(outer_map_fd); + return err; +} + void test_btf_map_in_map(void) { - int duration = 0, err, key = 0, val; - struct test_btf_map_in_map* skel; + int err, key = 0, val, i; + struct test_btf_map_in_map *skel; + int outer_arr_fd, outer_hash_fd; + int fd, map1_fd, map2_fd, map1_id, map2_id; skel = test_btf_map_in_map__open_and_load(); if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n")) @@ -18,32 +68,78 @@ void test_btf_map_in_map(void) if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) goto cleanup; + map1_fd = bpf_map__fd(skel->maps.inner_map1); + map2_fd = bpf_map__fd(skel->maps.inner_map2); + outer_arr_fd = bpf_map__fd(skel->maps.outer_arr); + outer_hash_fd = bpf_map__fd(skel->maps.outer_hash); + /* inner1 = input, inner2 = input + 1 */ - val = bpf_map__fd(skel->maps.inner_map1); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0); - val = bpf_map__fd(skel->maps.inner_map2); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0); + map1_fd = bpf_map__fd(skel->maps.inner_map1); + bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0); + map2_fd = bpf_map__fd(skel->maps.inner_map2); + bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0); skel->bss->input = 1; usleep(1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val); + bpf_map_lookup_elem(map1_fd, &key, &val); CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val); + bpf_map_lookup_elem(map2_fd, &key, &val); CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2); /* inner1 = input + 1, inner2 = input */ - val = bpf_map__fd(skel->maps.inner_map2); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0); - val = bpf_map__fd(skel->maps.inner_map1); - bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0); + bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0); + bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0); skel->bss->input = 3; usleep(1); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val); + bpf_map_lookup_elem(map1_fd, &key, &val); CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4); - bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val); + bpf_map_lookup_elem(map2_fd, &key, &val); CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3); + for (i = 0; i < 5; i++) { + val = i % 2 ? map1_fd : map2_fd; + err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update hash_of_maps on iter #%d\n", i); + goto cleanup; + } + err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0); + if (CHECK_FAIL(err)) { + printf("failed to update hash_of_maps on iter #%d\n", i); + goto cleanup; + } + } + + map1_id = bpf_map_id(skel->maps.inner_map1); + map2_id = bpf_map_id(skel->maps.inner_map2); + CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n"); + CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n"); + + test_btf_map_in_map__destroy(skel); + skel = NULL; + + /* we need to either wait for or force synchronize_rcu(), before + * checking for "still exists" condition, otherwise map could still be + * resolvable by ID, causing false positives. + * + * Older kernels (5.8 and earlier) freed map only after two + * synchronize_rcu()s, so trigger two, to be entirely sure. + */ + CHECK(kern_sync_rcu(), "sync_rcu", "failed\n"); + CHECK(kern_sync_rcu(), "sync_rcu", "failed\n"); + + fd = bpf_map_get_fd_by_id(map1_id); + if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) { + close(fd); + goto cleanup; + } + fd = bpf_map_get_fd_by_id(map2_id); + if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) { + close(fd); + goto cleanup; + } + cleanup: test_btf_map_in_map__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c index 83493bd5745c875d60cb71bfe929adc9bf5e0808..109d0345a2be5ae30de270311fbba2c1ade45628 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -36,7 +36,7 @@ void test_fentry_fexit(void) fentry_res = (__u64 *)fentry_skel->bss; fexit_res = (__u64 *)fexit_skel->bss; printf("%lld\n", fentry_skel->bss->test1_result); - for (i = 0; i < 6; i++) { + for (i = 0; i < 8; i++) { CHECK(fentry_res[i] != 1, "result", "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]); CHECK(fexit_res[i] != 1, "result", diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index ea14e3ece81202ea8e4d922a1e0b041b2f2e2c99..f11f187990e95b87cb81d8457a976d8bb86f9fdd 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -527,8 +527,8 @@ static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd) run_tests_skb_less(tap_fd, skel->maps.last_dissection); - err = bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); - CHECK(err, "bpf_prog_detach", "err %d errno %d\n", err, errno); + err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); + CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno); } static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd) diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c index 15cb554a66d8c8504bb6e98ce13a7ead7ae533ba..172c586b69969b29d1e67c108f9649ea69c1ea1f 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c @@ -1,9 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Test that the flow_dissector program can be updated with a single - * syscall by attaching a new program that replaces the existing one. - * - * Corner case - the same program cannot be attached twice. + * Tests for attaching, detaching, and replacing flow_dissector BPF program. */ #define _GNU_SOURCE @@ -116,7 +113,7 @@ static void test_prog_attach_prog_attach(int netns, int prog1, int prog2) CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2)); out_detach: - err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR); + err = bpf_prog_detach2(prog2, 0, BPF_FLOW_DISSECTOR); if (CHECK_FAIL(err)) perror("bpf_prog_detach"); CHECK_FAIL(prog_is_attached(netns)); @@ -152,7 +149,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2) DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts); int err, link; - err = bpf_prog_attach(prog1, -1, BPF_FLOW_DISSECTOR, 0); + err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0); if (CHECK_FAIL(err)) { perror("bpf_prog_attach(prog1)"); return; @@ -168,7 +165,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2) close(link); CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); - err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR); + err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR); if (CHECK_FAIL(err)) perror("bpf_prog_detach"); CHECK_FAIL(prog_is_attached(netns)); @@ -188,7 +185,7 @@ static void test_link_create_prog_attach(int netns, int prog1, int prog2) /* Expect failure attaching prog when link exists */ errno = 0; - err = bpf_prog_attach(prog2, -1, BPF_FLOW_DISSECTOR, 0); + err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0); if (CHECK_FAIL(!err || errno != EEXIST)) perror("bpf_prog_attach(prog2) expected EEXIST"); CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); @@ -211,7 +208,7 @@ static void test_link_create_prog_detach(int netns, int prog1, int prog2) /* Expect failure detaching prog when link exists */ errno = 0; - err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR); + err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR); if (CHECK_FAIL(!err || errno != EINVAL)) perror("bpf_prog_detach expected EINVAL"); CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); @@ -231,7 +228,7 @@ static void test_prog_attach_detach_query(int netns, int prog1, int prog2) } CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); - err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR); + err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR); if (CHECK_FAIL(err)) { perror("bpf_prog_detach"); return; @@ -308,6 +305,31 @@ static void test_link_update_replace_old_prog(int netns, int prog1, int prog2) CHECK_FAIL(prog_is_attached(netns)); } +static void test_link_update_same_prog(int netns, int prog1, int prog2) +{ + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts); + int err, link; + + link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts); + if (CHECK_FAIL(link < 0)) { + perror("bpf_link_create(prog1)"); + return; + } + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + /* Expect success updating the prog with the same one */ + update_opts.flags = 0; + update_opts.old_prog_fd = 0; + err = bpf_link_update(link, prog1, &update_opts); + if (CHECK_FAIL(err)) + perror("bpf_link_update"); + CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1)); + + close(link); + CHECK_FAIL(prog_is_attached(netns)); +} + static void test_link_update_invalid_opts(int netns, int prog1, int prog2) { DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts); @@ -571,6 +593,8 @@ static void run_tests(int netns) test_link_update_no_old_prog }, { "link update with replace old prog", test_link_update_replace_old_prog }, + { "link update with same prog", + test_link_update_same_prog }, { "link update invalid opts", test_link_update_invalid_opts }, { "link update invalid prog", diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c index e7b8753eac0b14e73bad2f95aa6c3b82f8efe645..75ecf956a2df9bc7101c7a73cfcae7a4b947b0d6 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c @@ -25,7 +25,7 @@ struct bpf_iter__netlink { struct netlink_sock *sk; } __attribute__((preserve_access_index)); -static inline struct inode *SOCK_INODE(struct socket *socket) +static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket) { return &container_of(socket, struct socket_alloc, socket)->vfs_inode; } diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c index 9365b686f84bc8ef7f15ec8b57f10a5c0af6a19d..5f645fdaba6f50a44cfd2a2ff49f37c34fdd52fa 100644 --- a/tools/testing/selftests/bpf/progs/fentry_test.c +++ b/tools/testing/selftests/bpf/progs/fentry_test.c @@ -55,3 +55,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void * e, __u64 f) e == (void *)20 && f == 21; return 0; } + +struct bpf_fentry_test_t { + struct bpf_fentry_test_t *a; +}; + +__u64 test7_result = 0; +SEC("fentry/bpf_fentry_test7") +int BPF_PROG(test7, struct bpf_fentry_test_t *arg) +{ + if (arg == 0) + test7_result = 1; + return 0; +} + +__u64 test8_result = 0; +SEC("fentry/bpf_fentry_test8") +int BPF_PROG(test8, struct bpf_fentry_test_t *arg) +{ + if (arg->a == 0) + test8_result = 1; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c index bd1e17d8024ce7eee03782edd86372ae01117e51..0952affb22a68526eb57a416614a8f0705cf9649 100644 --- a/tools/testing/selftests/bpf/progs/fexit_test.c +++ b/tools/testing/selftests/bpf/progs/fexit_test.c @@ -56,3 +56,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void *e, __u64 f, int ret) e == (void *)20 && f == 21 && ret == 111; return 0; } + +struct bpf_fentry_test_t { + struct bpf_fentry_test *a; +}; + +__u64 test7_result = 0; +SEC("fexit/bpf_fentry_test7") +int BPF_PROG(test7, struct bpf_fentry_test_t *arg) +{ + if (arg == 0) + test7_result = 1; + return 0; +} + +__u64 test8_result = 0; +SEC("fexit/bpf_fentry_test8") +int BPF_PROG(test8, struct bpf_fentry_test_t *arg) +{ + if (arg->a == 0) + test8_result = 1; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h index 057036ca111179fa737c4c670e4d745536598a2d..3dca4c2e24185480540fd7c283148b28ee6a08d2 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h @@ -79,7 +79,7 @@ struct { struct { __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(max_entries, 2); + __uint(max_entries, 3); __type(key, int); __type(value, int); } sock_skb_opts SEC(".maps"); @@ -94,6 +94,12 @@ struct { SEC("sk_skb1") int bpf_prog1(struct __sk_buff *skb) { + int *f, two = 2; + + f = bpf_map_lookup_elem(&sock_skb_opts, &two); + if (f && *f) { + return *f; + } return skb->len; } diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c index 330811260123260949b58351176df511c887d226..0ac08649772228ebd7846e6a543ce78bbc25168f 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c @@ -27,7 +27,7 @@ int xdp_dummy_prog(struct xdp_md *ctx) /* valid program on DEVMAP entry via SEC name; * has access to egress and ingress ifindex */ -SEC("xdp_devmap") +SEC("xdp_devmap/map_prog") int xdp_dummy_dm(struct xdp_md *ctx) { char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n"; diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 6a12a0e01e0731a390f22963954feabce648f0c2..754cf611723ee53f759bcf7a88f989033431ae1a 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -789,19 +789,19 @@ static void test_sockmap(unsigned int tasks, void *data) } err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER); - if (err) { + if (!err) { printf("Failed empty parser prog detach\n"); goto out_sockmap; } err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT); - if (err) { + if (!err) { printf("Failed empty verdict prog detach\n"); goto out_sockmap; } err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT); - if (err) { + if (!err) { printf("Failed empty msg verdict prog detach\n"); goto out_sockmap; } @@ -1090,19 +1090,19 @@ static void test_sockmap(unsigned int tasks, void *data) assert(status == 0); } - err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE); + err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE); if (!err) { printf("Detached an invalid prog type.\n"); goto out_sockmap; } - err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER); + err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER); if (err) { printf("Failed parser prog detach\n"); goto out_sockmap; } - err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT); + err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT); if (err) { printf("Failed parser prog detach\n"); goto out_sockmap; diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index 8294ae3ffb3cbe4197a9c2b90bc531a41b903fe6..43c9cda199b82b69ceda9eabacc25527b57516cd 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -318,6 +318,9 @@ class DebugfsDir: continue if os.path.isfile(p): + # We need to init trap_flow_action_cookie before read it + if f == "trap_flow_action_cookie": + cmd('echo deadbeef > %s/%s' % (path, f)) _, out = cmd('cat %s/%s' % (path, f)) dfs[f] = out.strip() elif os.path.isdir(p): diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 37695fc8096ad622990b034749416b6234daf58e..78789b27e5730267d7b54ad8e1679aac3b35b6b1 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -85,6 +85,7 @@ int txmsg_ktls_skb_drop; int txmsg_ktls_skb_redir; int ktls; int peek_flag; +int skb_use_parser; static const struct option long_options[] = { {"help", no_argument, NULL, 'h' }, @@ -174,6 +175,7 @@ static void test_reset(void) txmsg_apply = txmsg_cork = 0; txmsg_ingress = txmsg_redir_skb = 0; txmsg_ktls_skb = txmsg_ktls_skb_drop = txmsg_ktls_skb_redir = 0; + skb_use_parser = 0; } static int test_start_subtest(const struct _test *t, struct sockmap_options *o) @@ -1211,6 +1213,11 @@ static int run_options(struct sockmap_options *options, int cg_fd, int test) } } + if (skb_use_parser) { + i = 2; + err = bpf_map_update_elem(map_fd[7], &i, &skb_use_parser, BPF_ANY); + } + if (txmsg_drop) options->drop_expected = true; @@ -1650,6 +1657,16 @@ static void test_txmsg_cork(int cgrp, struct sockmap_options *opt) test_send(opt, cgrp); } +static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt) +{ + txmsg_pass = 1; + skb_use_parser = 512; + opt->iov_length = 256; + opt->iov_count = 1; + opt->rate = 2; + test_exec(cgrp, opt); +} + char *map_names[] = { "sock_map", "sock_map_txmsg", @@ -1748,6 +1765,7 @@ struct _test test[] = { {"txmsg test pull-data", test_txmsg_pull}, {"txmsg test pop-data", test_txmsg_pop}, {"txmsg test push/pop data", test_txmsg_push_pop}, + {"txmsg text ingress parser", test_txmsg_ingress_parser}, }; static int check_whitelist(struct _test *t, struct sockmap_options *opt) diff --git a/tools/testing/selftests/bpf/verifier/event_output.c b/tools/testing/selftests/bpf/verifier/event_output.c index 99f8f582c02bd1b4edd422d89c89f80694048453..c5e805980409c4b3f8a19869057e75a5df20db23 100644 --- a/tools/testing/selftests/bpf/verifier/event_output.c +++ b/tools/testing/selftests/bpf/verifier/event_output.c @@ -112,6 +112,7 @@ "perfevent for cgroup sockopt", .insns = { __PERF_EVENT_INSNS__ }, .prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, .fixup_map_event_output = { 4 }, .result = ACCEPT, .retval = 1, diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh index 3702dbcc90a773d1e4d87aae2ce52ce9a9289701..c82aa77958e50a14b014435d8b5ace0867409019 100755 --- a/tools/testing/selftests/kmod/kmod.sh +++ b/tools/testing/selftests/kmod/kmod.sh @@ -63,6 +63,8 @@ ALL_TESTS="$ALL_TESTS 0008:150:1" ALL_TESTS="$ALL_TESTS 0009:150:1" ALL_TESTS="$ALL_TESTS 0010:1:1" ALL_TESTS="$ALL_TESTS 0011:1:1" +ALL_TESTS="$ALL_TESTS 0012:1:1" +ALL_TESTS="$ALL_TESTS 0013:1:1" # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 @@ -470,6 +472,38 @@ kmod_test_0011() echo "$MODPROBE" > /proc/sys/kernel/modprobe } +kmod_check_visibility() +{ + local name="$1" + local cmd="$2" + + modprobe $DEFAULT_KMOD_DRIVER + + local priv=$(eval $cmd) + local unpriv=$(capsh --drop=CAP_SYSLOG -- -c "$cmd") + + if [ "$priv" = "$unpriv" ] || \ + [ "${priv:0:3}" = "0x0" ] || \ + [ "${unpriv:0:3}" != "0x0" ] ; then + echo "${FUNCNAME[0]}: FAIL, $name visible to unpriv: '$priv' vs '$unpriv'" >&2 + exit 1 + else + echo "${FUNCNAME[0]}: OK!" + fi +} + +kmod_test_0012() +{ + kmod_check_visibility /proc/modules \ + "grep '^${DEFAULT_KMOD_DRIVER}\b' /proc/modules | awk '{print \$NF}'" +} + +kmod_test_0013() +{ + kmod_check_visibility '/sys/module/*/sections/*' \ + "cat /sys/module/${DEFAULT_KMOD_DRIVER}/sections/.*text | head -n1" +} + list_tests() { echo "Test ID list:" @@ -489,6 +523,8 @@ list_tests() echo "0009 x $(get_test_count 0009) - multithreaded - push kmod_concurrent over max_modprobes for get_fs_type()" echo "0010 x $(get_test_count 0010) - test nonexistent modprobe path" echo "0011 x $(get_test_count 0011) - test completely disabling module autoloading" + echo "0012 x $(get_test_count 0012) - test /proc/modules address visibility under CAP_SYSLOG" + echo "0013 x $(get_test_count 0013) - test /sys/module/*/sections/* visibility under CAP_SYSLOG" } usage() diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h index 0ac49d91a26023c3eb10804f4cf77fc004b4d080..862eee73455388c8c4c5d9c962f95a8438dfa781 100644 --- a/tools/testing/selftests/kselftest.h +++ b/tools/testing/selftests/kselftest.h @@ -36,7 +36,7 @@ struct ksft_count { static struct ksft_count ksft_cnt; static unsigned int ksft_plan; -static inline int ksft_test_num(void) +static inline unsigned int ksft_test_num(void) { return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail + ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass + diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c index 54cdefdfb49de2bb11f3e814a527cf4d0aa5dbba..d59f3eb67c8f55a075fe05c055e8018a72257db2 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c @@ -76,10 +76,8 @@ void set_default_state(struct kvm_nested_state *state) void set_default_vmx_state(struct kvm_nested_state *state, int size) { memset(state, 0, size); - state->flags = KVM_STATE_NESTED_GUEST_MODE | - KVM_STATE_NESTED_RUN_PENDING; if (have_evmcs) - state->flags |= KVM_STATE_NESTED_EVMCS; + state->flags = KVM_STATE_NESTED_EVMCS; state->format = 0; state->size = size; state->hdr.vmx.vmxon_pa = 0x1000; @@ -148,6 +146,11 @@ void test_vmx_nested_state(struct kvm_vm *vm) state->hdr.vmx.smm.flags = 1; test_nested_state_expect_einval(vm, state); + /* Invalid flags are rejected. */ + set_default_vmx_state(state, state_sz); + state->hdr.vmx.flags = ~0; + test_nested_state_expect_einval(vm, state); + /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */ set_default_vmx_state(state, state_sz); state->hdr.vmx.vmxon_pa = -1ull; @@ -185,20 +188,41 @@ void test_vmx_nested_state(struct kvm_vm *vm) state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; test_nested_state_expect_einval(vm, state); - /* Size must be large enough to fit kvm_nested_state and vmcs12. */ + /* + * Size must be large enough to fit kvm_nested_state and vmcs12 + * if VMCS12 physical address is set + */ set_default_vmx_state(state, state_sz); state->size = sizeof(*state); + state->flags = 0; + test_nested_state_expect_einval(vm, state); + + set_default_vmx_state(state, state_sz); + state->size = sizeof(*state); + state->flags = 0; + state->hdr.vmx.vmcs12_pa = -1; test_nested_state(vm, state); - /* vmxon_pa cannot be the same address as vmcs_pa. */ + /* + * KVM_SET_NESTED_STATE succeeds with invalid VMCS + * contents but L2 not running. + */ set_default_vmx_state(state, state_sz); - state->hdr.vmx.vmxon_pa = 0; - state->hdr.vmx.vmcs12_pa = 0; + state->flags = 0; + test_nested_state(vm, state); + + /* Invalid flags are rejected, even if no VMCS loaded. */ + set_default_vmx_state(state, state_sz); + state->size = sizeof(*state); + state->flags = 0; + state->hdr.vmx.vmcs12_pa = -1; + state->hdr.vmx.flags = ~0; test_nested_state_expect_einval(vm, state); - /* The revision id for vmcs12 must be VMCS12_REVISION. */ + /* vmxon_pa cannot be the same address as vmcs_pa. */ set_default_vmx_state(state, state_sz); - set_revision_id_for_vmcs12(state, 0); + state->hdr.vmx.vmxon_pa = 0; + state->hdr.vmx.vmcs12_pa = 0; test_nested_state_expect_einval(vm, state); /* diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh index 9dc35a16e4159685fb58ec11e3863afc906ab2cf..51df5e305855a7aa3c7ca61782697a3a5d6bb617 100755 --- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh +++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh @@ -144,7 +144,7 @@ setup() cleanup() { - for n in h1 r1 h2 h3 h4 + for n in h0 r1 h1 h2 h3 do ip netns del ${n} 2>/dev/null done diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index dee567f7576abdff8bc83112ec5131452fff6727..22dc2f3d428bab3c00a299aa589a6edd794ea3db 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -747,6 +747,19 @@ ipv6_fcnal_runtime() run_cmd "$IP nexthop add id 86 via 2001:db8:91::2 dev veth1" run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81" + # rpfilter and default route + $IP nexthop flush >/dev/null 2>&1 + run_cmd "ip netns exec me ip6tables -t mangle -I PREROUTING 1 -m rpfilter --invert -j DROP" + run_cmd "$IP nexthop add id 91 via 2001:db8:91::2 dev veth1" + run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3" + run_cmd "$IP nexthop add id 93 group 91/92" + run_cmd "$IP -6 ro add default nhid 91" + run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1" + log_test $? 0 "Nexthop with default route and rpfilter" + run_cmd "$IP -6 ro replace default nhid 93" + run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1" + log_test $? 0 "Nexthop with multipath default route and rpfilter" + # TO-DO: # existing route with old nexthop; append route with new nexthop # existing route with old nexthop; replace route with new diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh index eb8e2a23bbb4c7d64dc4473db309024c20124b86..43a948feed2652b359aee93f82f39b6d11304bfc 100755 --- a/tools/testing/selftests/net/forwarding/ethtool.sh +++ b/tools/testing/selftests/net/forwarding/ethtool.sh @@ -252,8 +252,6 @@ check_highest_speed_is_chosen() fi local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1)) - # Remove the first speed, h1 does not advertise this speed. - unset speeds_arr[0] max_speed=${speeds_arr[0]} for current in ${speeds_arr[@]}; do diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh index 15d3489ecd9cee13247d06cc5eaf39967a752785..ceb7ad4dbd945f38f115010949a335fd982e9e03 100755 --- a/tools/testing/selftests/net/ip_defrag.sh +++ b/tools/testing/selftests/net/ip_defrag.sh @@ -6,6 +6,8 @@ set +x set -e +modprobe -q nf_defrag_ipv6 + readonly NETNS="ns-$(mktemp -u XXXXXX)" setup() { diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index 8c8c7d79c38d9c2df1ca4ff2cbb6d3b827e03598..2c522f7a0aeca137073de461f9f884cf8564eb77 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off, int fds[2], fds_udp[2][2], ret; fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n", - typeflags, PORT_BASE, PORT_BASE + port_off); + typeflags, (uint16_t)PORT_BASE, + (uint16_t)(PORT_BASE + port_off)); fds[0] = sock_fanout_open(typeflags, 0); fds[1] = sock_fanout_open(typeflags, 0); diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c index 422e7761254dee75c99f72693fe3ef40079cb7bd..bcb79ba1f2143e1569fd96943170c357044f1d38 100644 --- a/tools/testing/selftests/net/rxtimestamp.c +++ b/tools/testing/selftests/net/rxtimestamp.c @@ -329,8 +329,7 @@ int main(int argc, char **argv) bool all_tests = true; int arg_index = 0; int failures = 0; - int s, t; - char opt; + int s, t, opt; while ((opt = getopt_long(argc, argv, "", long_options, &arg_index)) != -1) { diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c index ceaad78e9667458b76d297debce723c75a3416c8..3155fbbf644b0c1f4dffe8477360116b0ca89153 100644 --- a/tools/testing/selftests/net/so_txtime.c +++ b/tools/testing/selftests/net/so_txtime.c @@ -121,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts) if (rbuf[0] != ts->data) error(1, 0, "payload mismatch. expected %c", ts->data); - if (labs(tstop - texpect) > cfg_variance_us) + if (llabs(tstop - texpect) > cfg_variance_us) error(1, 0, "exceeds variance (%d us)", cfg_variance_us); return false; diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c index 4555f88252bafd31d6c225590316f03b08d3b132..a61b7b3da5496285876b0e16b18a3060850b0803 100644 --- a/tools/testing/selftests/net/tcp_mmap.c +++ b/tools/testing/selftests/net/tcp_mmap.c @@ -344,7 +344,7 @@ int main(int argc, char *argv[]) { struct sockaddr_storage listenaddr, addr; unsigned int max_pacing_rate = 0; - size_t total = 0; + uint64_t total = 0; char *host = NULL; int fd, c, on = 1; char *buffer; @@ -473,12 +473,12 @@ int main(int argc, char *argv[]) zflg = 0; } while (total < FILE_SZ) { - ssize_t wr = FILE_SZ - total; + int64_t wr = FILE_SZ - total; if (wr > chunk_size) wr = chunk_size; /* Note : we just want to fill the pipe with 0 bytes */ - wr = send(fd, buffer, wr, zflg ? MSG_ZEROCOPY : 0); + wr = send(fd, buffer, (size_t)wr, zflg ? MSG_ZEROCOPY : 0); if (wr <= 0) break; total += wr; diff --git a/tools/testing/selftests/net/txtimestamp.sh b/tools/testing/selftests/net/txtimestamp.sh index eea6f5193693f633cccfb722a7d6f33f654d8b2c..31637769f59f607377532695d7e1c1d1ae96d9eb 100755 --- a/tools/testing/selftests/net/txtimestamp.sh +++ b/tools/testing/selftests/net/txtimestamp.sh @@ -75,7 +75,7 @@ main() { fi } -if [[ "$(ip netns identify)" == "root" ]]; then +if [[ -z "$(ip netns identify)" ]]; then ./in_netns.sh $0 $@ else main $@ diff --git a/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c b/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c index 6ee0fded03911011c9445f59bea232d554e9989c..7c23d3dd7d6d91f574449cd694ff7efc3f0625f7 100644 --- a/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c +++ b/tools/testing/selftests/powerpc/nx-gzip/gunz_test.c @@ -698,13 +698,13 @@ int decompress_file(int argc, char **argv, void *devhandle) switch (cc) { - case ERR_NX_TRANSLATION: + case ERR_NX_AT_FAULT: /* We touched the pages ahead of time. In the most common case * we shouldn't be here. But may be some pages were paged out. * Kernel should have placed the faulting address to fsaddr. */ - NXPRT(fprintf(stderr, "ERR_NX_TRANSLATION %p\n", + NXPRT(fprintf(stderr, "ERR_NX_AT_FAULT %p\n", (void *)cmdp->crb.csb.fsaddr)); if (pgfault_retries == NX_MAX_FAULTS) { diff --git a/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c index 7496a83f9c9d78b510cb3e14cf80fc9ee6bda860..02dffb65de48b6001176385fd3a4577de1a69ee5 100644 --- a/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c +++ b/tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c @@ -306,13 +306,13 @@ int compress_file(int argc, char **argv, void *handle) lzcounts, cmdp, handle); if (cc != ERR_NX_OK && cc != ERR_NX_TPBC_GT_SPBC && - cc != ERR_NX_TRANSLATION) { + cc != ERR_NX_AT_FAULT) { fprintf(stderr, "nx error: cc= %d\n", cc); exit(-1); } /* Page faults are handled by the user code */ - if (cc == ERR_NX_TRANSLATION) { + if (cc == ERR_NX_AT_FAULT) { NXPRT(fprintf(stderr, "page fault: cc= %d, ", cc)); NXPRT(fprintf(stderr, "try= %d, fsa= %08llx\n", fault_tries, diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh index 1334e301d2a0390e5f58d7a3f764c56c3a672ba9..3e5ff29ee1dd9f9e14c31b02ca1859145b1afd76 100755 --- a/tools/testing/selftests/tpm2/test_smoke.sh +++ b/tools/testing/selftests/tpm2/test_smoke.sh @@ -6,5 +6,5 @@ ksft_skip=4 [ -e /dev/tpm0 ] || exit $ksft_skip -python -m unittest -v tpm2_tests.SmokeTest -python -m unittest -v tpm2_tests.AsyncTest +python3 -m unittest -v tpm2_tests.SmokeTest +python3 -m unittest -v tpm2_tests.AsyncTest diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh index 00259cb746cf6288323850d104f30a31caa6203a..04c47b13fe8ac8cb27c242ea9a8f2c5eebf7e3e7 100755 --- a/tools/testing/selftests/tpm2/test_space.sh +++ b/tools/testing/selftests/tpm2/test_space.sh @@ -6,4 +6,4 @@ ksft_skip=4 [ -e /dev/tpmrm0 ] || exit $ksft_skip -python -m unittest -v tpm2_tests.SpaceTest +python3 -m unittest -v tpm2_tests.SpaceTest diff --git a/tools/testing/selftests/tpm2/tpm2.py b/tools/testing/selftests/tpm2/tpm2.py index d0fcb66a88a68a0bf9e57d5da24aadfd695c12f9..f34486cd7342d51a9cfddab193ca01243fb0b2e4 100644 --- a/tools/testing/selftests/tpm2/tpm2.py +++ b/tools/testing/selftests/tpm2/tpm2.py @@ -247,14 +247,14 @@ class ProtocolError(Exception): class AuthCommand(object): """TPMS_AUTH_COMMAND""" - def __init__(self, session_handle=TPM2_RS_PW, nonce='', session_attributes=0, - hmac=''): + def __init__(self, session_handle=TPM2_RS_PW, nonce=bytes(), + session_attributes=0, hmac=bytes()): self.session_handle = session_handle self.nonce = nonce self.session_attributes = session_attributes self.hmac = hmac - def __str__(self): + def __bytes__(self): fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac)) return struct.pack(fmt, self.session_handle, len(self.nonce), self.nonce, self.session_attributes, len(self.hmac), @@ -268,11 +268,11 @@ class AuthCommand(object): class SensitiveCreate(object): """TPMS_SENSITIVE_CREATE""" - def __init__(self, user_auth='', data=''): + def __init__(self, user_auth=bytes(), data=bytes()): self.user_auth = user_auth self.data = data - def __str__(self): + def __bytes__(self): fmt = '>H%us H%us' % (len(self.user_auth), len(self.data)) return struct.pack(fmt, len(self.user_auth), self.user_auth, len(self.data), self.data) @@ -296,8 +296,9 @@ class Public(object): return '>HHIH%us%usH%us' % \ (len(self.auth_policy), len(self.parameters), len(self.unique)) - def __init__(self, object_type, name_alg, object_attributes, auth_policy='', - parameters='', unique=''): + def __init__(self, object_type, name_alg, object_attributes, + auth_policy=bytes(), parameters=bytes(), + unique=bytes()): self.object_type = object_type self.name_alg = name_alg self.object_attributes = object_attributes @@ -305,7 +306,7 @@ class Public(object): self.parameters = parameters self.unique = unique - def __str__(self): + def __bytes__(self): return struct.pack(self.__fmt(), self.object_type, self.name_alg, @@ -343,7 +344,7 @@ def get_algorithm(name): def hex_dump(d): d = [format(ord(x), '02x') for x in d] - d = [d[i: i + 16] for i in xrange(0, len(d), 16)] + d = [d[i: i + 16] for i in range(0, len(d), 16)] d = [' '.join(x) for x in d] d = os.linesep.join(d) @@ -401,7 +402,7 @@ class Client: pcrsel_len = max((i >> 3) + 1, 3) pcrsel = [0] * pcrsel_len pcrsel[i >> 3] = 1 << (i & 7) - pcrsel = ''.join(map(chr, pcrsel)) + pcrsel = ''.join(map(chr, pcrsel)).encode() fmt = '>HII IHB%us' % (pcrsel_len) cmd = struct.pack(fmt, @@ -443,7 +444,7 @@ class Client: TPM2_CC_PCR_EXTEND, i, len(auth_cmd), - str(auth_cmd), + bytes(auth_cmd), 1, bank_alg, dig) self.send_cmd(cmd) @@ -457,7 +458,7 @@ class Client: TPM2_RH_NULL, TPM2_RH_NULL, 16, - '\0' * 16, + ('\0' * 16).encode(), 0, session_type, TPM2_ALG_NULL, @@ -472,7 +473,7 @@ class Client: for i in pcrs: pcr = self.read_pcr(i, bank_alg) - if pcr == None: + if pcr is None: return None x += pcr @@ -489,7 +490,7 @@ class Client: pcrsel = [0] * pcrsel_len for i in pcrs: pcrsel[i >> 3] |= 1 << (i & 7) - pcrsel = ''.join(map(chr, pcrsel)) + pcrsel = ''.join(map(chr, pcrsel)).encode() fmt = '>HII IH%usIHB3s' % ds cmd = struct.pack(fmt, @@ -497,7 +498,8 @@ class Client: struct.calcsize(fmt), TPM2_CC_POLICY_PCR, handle, - len(dig), str(dig), + len(dig), + bytes(dig), 1, bank_alg, pcrsel_len, pcrsel) @@ -534,7 +536,7 @@ class Client: self.send_cmd(cmd) - def create_root_key(self, auth_value = ''): + def create_root_key(self, auth_value = bytes()): attributes = \ Public.FIXED_TPM | \ Public.FIXED_PARENT | \ @@ -570,11 +572,11 @@ class Client: TPM2_CC_CREATE_PRIMARY, TPM2_RH_OWNER, len(auth_cmd), - str(auth_cmd), + bytes(auth_cmd), len(sensitive), - str(sensitive), + bytes(sensitive), len(public), - str(public), + bytes(public), 0, 0) return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0] @@ -587,7 +589,7 @@ class Client: attributes = 0 if not policy_dig: attributes |= Public.USER_WITH_AUTH - policy_dig = '' + policy_dig = bytes() auth_cmd = AuthCommand() sensitive = SensitiveCreate(user_auth=auth_value, data=data) @@ -608,11 +610,11 @@ class Client: TPM2_CC_CREATE, parent_key, len(auth_cmd), - str(auth_cmd), + bytes(auth_cmd), len(sensitive), - str(sensitive), + bytes(sensitive), len(public), - str(public), + bytes(public), 0, 0) rsp = self.send_cmd(cmd) @@ -635,7 +637,7 @@ class Client: TPM2_CC_LOAD, parent_key, len(auth_cmd), - str(auth_cmd), + bytes(auth_cmd), blob) data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0] @@ -653,7 +655,7 @@ class Client: TPM2_CC_UNSEAL, data_handle, len(auth_cmd), - str(auth_cmd)) + bytes(auth_cmd)) try: rsp = self.send_cmd(cmd) @@ -675,7 +677,7 @@ class Client: TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET, TPM2_RH_LOCKOUT, len(auth_cmd), - str(auth_cmd)) + bytes(auth_cmd)) self.send_cmd(cmd) @@ -693,7 +695,7 @@ class Client: more_data, cap, cnt = struct.unpack('>BII', rsp[:9]) rsp = rsp[9:] - for i in xrange(0, cnt): + for i in range(0, cnt): handle = struct.unpack('>I', rsp[:4])[0] handles.append(handle) rsp = rsp[4:] diff --git a/tools/testing/selftests/tpm2/tpm2_tests.py b/tools/testing/selftests/tpm2/tpm2_tests.py index 728be7c69b764fe48592bd22e20d01a0fe19d68c..9d764306887b7008b5a75b2eab6584c2abb06e76 100644 --- a/tools/testing/selftests/tpm2/tpm2_tests.py +++ b/tools/testing/selftests/tpm2/tpm2_tests.py @@ -20,8 +20,8 @@ class SmokeTest(unittest.TestCase): self.client.close() def test_seal_with_auth(self): - data = 'X' * 64 - auth = 'A' * 15 + data = ('X' * 64).encode() + auth = ('A' * 15).encode() blob = self.client.seal(self.root_key, data, auth, None) result = self.client.unseal(self.root_key, blob, auth, None) @@ -30,8 +30,8 @@ class SmokeTest(unittest.TestCase): def test_seal_with_policy(self): handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL) - data = 'X' * 64 - auth = 'A' * 15 + data = ('X' * 64).encode() + auth = ('A' * 15).encode() pcrs = [16] try: @@ -58,14 +58,15 @@ class SmokeTest(unittest.TestCase): self.assertEqual(data, result) def test_unseal_with_wrong_auth(self): - data = 'X' * 64 - auth = 'A' * 20 + data = ('X' * 64).encode() + auth = ('A' * 20).encode() rc = 0 blob = self.client.seal(self.root_key, data, auth, None) try: - result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B', None) - except ProtocolError, e: + result = self.client.unseal(self.root_key, blob, + auth[:-1] + 'B'.encode(), None) + except ProtocolError as e: rc = e.rc self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL) @@ -73,8 +74,8 @@ class SmokeTest(unittest.TestCase): def test_unseal_with_wrong_policy(self): handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL) - data = 'X' * 64 - auth = 'A' * 17 + data = ('X' * 64).encode() + auth = ('A' * 17).encode() pcrs = [16] try: @@ -91,7 +92,7 @@ class SmokeTest(unittest.TestCase): # This should succeed. ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1) - self.client.extend_pcr(1, 'X' * ds) + self.client.extend_pcr(1, ('X' * ds).encode()) handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY) @@ -108,7 +109,7 @@ class SmokeTest(unittest.TestCase): # Then, extend a PCR that is part of the policy and try to unseal. # This should fail. - self.client.extend_pcr(16, 'X' * ds) + self.client.extend_pcr(16, ('X' * ds).encode()) handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY) @@ -119,7 +120,7 @@ class SmokeTest(unittest.TestCase): self.client.policy_password(handle) result = self.client.unseal(self.root_key, blob, auth, handle) - except ProtocolError, e: + except ProtocolError as e: rc = e.rc self.client.flush_context(handle) except: @@ -130,13 +131,13 @@ class SmokeTest(unittest.TestCase): def test_seal_with_too_long_auth(self): ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1) - data = 'X' * 64 - auth = 'A' * (ds + 1) + data = ('X' * 64).encode() + auth = ('A' * (ds + 1)).encode() rc = 0 try: blob = self.client.seal(self.root_key, data, auth, None) - except ProtocolError, e: + except ProtocolError as e: rc = e.rc self.assertEqual(rc, tpm2.TPM2_RC_SIZE) @@ -152,7 +153,7 @@ class SmokeTest(unittest.TestCase): 0xDEADBEEF) self.client.send_cmd(cmd) - except IOError, e: + except IOError as e: rejected = True except: pass @@ -212,7 +213,7 @@ class SmokeTest(unittest.TestCase): self.client.tpm.write(cmd) rsp = self.client.tpm.read() - except IOError, e: + except IOError as e: # read the response rsp = self.client.tpm.read() rejected = True @@ -283,7 +284,7 @@ class SpaceTest(unittest.TestCase): rc = 0 try: space1.send_cmd(cmd) - except ProtocolError, e: + except ProtocolError as e: rc = e.rc self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |