diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt index b0714d8f678..8dfb6a5f427 100644 --- a/Documentation/stable_kernel_rules.txt +++ b/Documentation/stable_kernel_rules.txt @@ -29,6 +29,9 @@ Rules on what kind of patches are accepted, and which ones are not, into the Procedure for submitting patches to the -stable tree: + - If the patch covers files in net/ or drivers/net please follow netdev stable + submission guidelines as described in + Documentation/networking/netdev-FAQ.txt - Send the patch, after verifying that it follows the above rules, to stable@vger.kernel.org. You must note the upstream commit ID in the changelog of your submission, as well as the kernel version you wish diff --git a/Makefile b/Makefile index 8e277aac874..386f32527ab 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 4 -SUBLEVEL = 103 +SUBLEVEL = 104 EXTRAVERSION = NAME = Saber-toothed Squirrel @@ -591,6 +591,10 @@ ifdef CONFIG_CC_GRAPHITE_OPTIMIZATION KBUILD_CFLAGS += -fgraphite-identity -floop-parallelize-all -ftree-loop-linear -floop-interchange -floop-strip-mine -floop-block endif +# conserve stack if available +# do this early so that an architecture can override it. +KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) + include $(srctree)/arch/$(SRCARCH)/Makefile #ifneq ($(CONFIG_FRAME_WARN),0) @@ -658,9 +662,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) -# conserve stack if available -KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) - # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) diff --git a/ak-mako-build.sh b/ak-gee-build.sh similarity index 83% rename from ak-mako-build.sh rename to ak-gee-build.sh index 59135a13304..679cb49ff01 100755 --- a/ak-mako-build.sh +++ b/ak-gee-build.sh @@ -10,15 +10,15 @@ clear # AK Kernel Version BASE_AK_VER="AK" -VER=".350.gee-aosp" +VER=".360.gee.aosp" AK_VER=$BASE_AK_VER$VER # AK Variables export LOCALVERSION="~"`echo $AK_VER` -export CROSS_COMPILE=${HOME}/kernel/AK-linaro/4.9.1-2014.07.20140718.CR83/bin/arm-cortex_a15-linux-gnueabihf- +export CROSS_COMPILE=${HOME}/kernel/linaro/arm-cortex_a15-linux-gnueabihf-linaro_4.9.2-2014.10/bin/arm-cortex_a15-linux-gnueabihf- export ARCH=arm export SUBARCH=arm -export KBUILD_BUILD_USER=Edgar +export KBUILD_BUILD_USER=Justin export KBUILD_BUILD_HOST="BuildBox" DATE_START=$(date +"%s") @@ -41,14 +41,14 @@ echo "Show: AK gee Settings" echo "------------------------" echo -e "${restore}" -MODULES_DIR=${HOME}/kernel/AK-anykernel/cwm/system/lib/modules +MODULES_DIR=${HOME}/kernel/AK-Mako-AnyKernel/cwm/system/lib/modules KERNEL_DIR=`pwd` -OUTPUT_DIR=${HOME}/kernel/AK-anykernel/zip -CWM_DIR=${HOME}/kernel/AK-anykernel/cwm -ZIMAGE_DIR=${HOME}/kernel/AK-xGenesis/arch/arm/boot +OUTPUT_DIR=${HOME}/kernel/AK-Mako-AnyKernel +CWM_DIR=${HOME}/kernel/AK-Mako-AnyKernel/cwm +ZIMAGE_DIR=${HOME}/kernel/AK-GEE/arch/arm/boot CWM_MOVE=${HOME}/kernel/AK-releases -ZIMAGE_ANYKERNEL=${HOME}/kernel/AK-anykernel/cwm/kernel -ANYKERNEL_DIR=${HOME}/kernel/AK-anykernel +ZIMAGE_ANYKERNEL=${HOME}/kernel/AK-Mako-AnyKernel/cwm/kernel +ANYKERNEL_DIR=${HOME}/kernel/AK-Mako-AnyKernel echo -e "${red}"; echo "COMPILING VERSION:"; echo -e "${blink_red}"; echo "$LOCALVERSION"; echo -e "${restore}" echo "CROSS_COMPILE="$CROSS_COMPILE diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index 7a3d38d5ed6..5ebab5895ed 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -489,6 +489,11 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) } #endif +#define ioread16be(p) be16_to_cpu(ioread16(p)) +#define ioread32be(p) be32_to_cpu(ioread32(p)) +#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p)) +#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p)) + #define inb_p inb #define inw_p inw #define inl_p inl diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c index a0a5d27aa21..b8ce18f485d 100644 --- a/arch/alpha/oprofile/common.c +++ b/arch/alpha/oprofile/common.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "op_impl.h" diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ccfd79d751c..c531eaa7871 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -158,10 +158,8 @@ config ARM_TICKET_LOCKS default y if ARCH_MSM_SCORPIONMP || ARCH_MSM_KRAITMP depends on SMP -config RWSEM_GENERIC_SPINLOCK - bool - config RWSEM_XCHGADD_ALGORITHM + bool def_bool y config ARCH_HAS_ILOG2_U32 diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 609ce176bf6..be636b05cc5 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -41,6 +41,10 @@ ifeq ($(CONFIG_CC_STACKPROTECTOR),y) KBUILD_CFLAGS +=-fstack-protector endif +# ARM gcc developers unfortunately broke -fconserve-stack. It misaligns +# variables on the stack +KBUILD_CFLAGS += $(call cc-option,-fno-conserve-stack) + ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) KBUILD_CPPFLAGS += -mbig-endian AS += -EB diff --git a/arch/arm/configs/gee_defconfig b/arch/arm/configs/gee_defconfig index 20570bfaf73..f9bb07c8b3e 100644 --- a/arch/arm/configs/gee_defconfig +++ b/arch/arm/configs/gee_defconfig @@ -717,6 +717,8 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_GOV_SMARTMAX=y CONFIG_CPU_FREQ_GOV_WHEATLEY=y CONFIG_CPU_FREQ_GOV_LAZY=y +CONFIG_CPU_FREQ_GOV_INTELLIACTIVE=y +CONFIG_CPU_FREQ_GOV_INTELLIDEMAND=y # # ARM CPU frequency scaling drivers @@ -1294,8 +1296,10 @@ CONFIG_QSEECOM=y CONFIG_USB_HSIC_SMSC_HUB=y # CONFIG_BU52031NVX is not set # CONFIG_C2PORT is not set -# CONFIG_DOUBLETAP_WAKE is not set +CONFIG_TOUCHSCREEN_DOUBLETAP2WAKE=y # CONFIG_TOUCH_WAKE is not set +CONFIG_TOUCHSCREEN_SWEEP2WAKE=y +CONFIG_TOUCHSCREEN_PREVENT_SLEEP=y # # EEPROM support diff --git a/arch/arm/configs/geeb_defconfig b/arch/arm/configs/geeb_defconfig index 0412e56d9da..648f2520bd6 100644 --- a/arch/arm/configs/geeb_defconfig +++ b/arch/arm/configs/geeb_defconfig @@ -1294,8 +1294,11 @@ CONFIG_QSEECOM=y CONFIG_USB_HSIC_SMSC_HUB=y # CONFIG_BU52031NVX is not set # CONFIG_C2PORT is not set -# CONFIG_DOUBLETAP_WAKE is not set +CONFIG_TOUCHSCREEN_DOUBLETAP2WAKE=y # CONFIG_TOUCH_WAKE is not set +CONFIG_TOUCHSCREEN_SWEEP2WAKE=y +CONFIG_TOUCHSCREEN_PREVENT_SLEEP=y + # # EEPROM support diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 2312fe5b9b0..237acb91f7d 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -15,7 +15,10 @@ generic-y += local64.h generic-y += percpu.h generic-y += poll.h generic-y += resource.h +generic-y += rwsem.h generic-y += sections.h generic-y += siginfo.h generic-y += sizes.h +generic-y += termios.h generic-y += unaligned.h + diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 44d577f6293..d10ac8b47b4 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -3,6 +3,8 @@ * * Copyright (C) 1995-1999 Russell King * + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -126,4 +128,6 @@ static inline void prefetch(const void *ptr) #endif +#include + #endif /* __ASM_ARM_PROCESSOR_H */ diff --git a/arch/arm/include/asm/relaxed.h b/arch/arm/include/asm/relaxed.h new file mode 100644 index 00000000000..378e48d99fe --- /dev/null +++ b/arch/arm/include/asm/relaxed.h @@ -0,0 +1,20 @@ +/* + * arm/include/asm/relaxed.h + * + * Copyright (c) 2014 NVIDIA Corporation. All rights reserved. + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ASM_RELAXED_H_ +#define _ASM_RELAXED_H_ + +#include + +#endif /*_ASM_RELAXED_H_*/ diff --git a/arch/arm/include/asm/termios.h b/arch/arm/include/asm/termios.h deleted file mode 100644 index 293e3f1bc3f..00000000000 --- a/arch/arm/include/asm/termios.h +++ /dev/null @@ -1,92 +0,0 @@ -#ifndef __ASM_ARM_TERMIOS_H -#define __ASM_ARM_TERMIOS_H - -#include -#include - -struct winsize { - unsigned short ws_row; - unsigned short ws_col; - unsigned short ws_xpixel; - unsigned short ws_ypixel; -}; - -#define NCC 8 -struct termio { - unsigned short c_iflag; /* input mode flags */ - unsigned short c_oflag; /* output mode flags */ - unsigned short c_cflag; /* control mode flags */ - unsigned short c_lflag; /* local mode flags */ - unsigned char c_line; /* line discipline */ - unsigned char c_cc[NCC]; /* control characters */ -}; - -#ifdef __KERNEL__ -/* intr=^C quit=^| erase=del kill=^U - eof=^D vtime=\0 vmin=\1 sxtc=\0 - start=^Q stop=^S susp=^Z eol=\0 - reprint=^R discard=^U werase=^W lnext=^V - eol2=\0 -*/ -#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" -#endif - -/* modem lines */ -#define TIOCM_LE 0x001 -#define TIOCM_DTR 0x002 -#define TIOCM_RTS 0x004 -#define TIOCM_ST 0x008 -#define TIOCM_SR 0x010 -#define TIOCM_CTS 0x020 -#define TIOCM_CAR 0x040 -#define TIOCM_RNG 0x080 -#define TIOCM_DSR 0x100 -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RI TIOCM_RNG -#define TIOCM_OUT1 0x2000 -#define TIOCM_OUT2 0x4000 -#define TIOCM_LOOP 0x8000 - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ - -#ifdef __KERNEL__ - -/* - * Translate a "termio" structure into a "termios". Ugh. - */ -#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ - unsigned short __tmp; \ - get_user(__tmp,&(termio)->x); \ - *(unsigned short *) &(termios)->x = __tmp; \ -} - -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ - SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) - -/* - * Translate a "termios" structure into a "termio". Ugh. - */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) -#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) - -#endif /* __KERNEL__ */ - -#endif /* __ASM_ARM_TERMIOS_H */ diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 9a8531eadd3..9d95a46b25f 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -76,26 +76,21 @@ #ifndef CONFIG_THUMB2_KERNEL .macro svc_exit, rpsr msr spsr_cxsf, \rpsr -#if defined(CONFIG_CPU_V6) - ldr r0, [sp] - strex r1, r2, [sp] @ clear the exclusive monitor - ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr -#elif defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr -#else - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) + @ We must avoid clrex due to Cortex-A15 erratum #830321 + sub r0, sp, #4 @ uninhabited address + strex r1, r2, [r0] @ clear the exclusive monitor #endif + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr .endm .macro restore_user_regs, fast = 0, offset = 0 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC]! @ get pc msr spsr_cxsf, r1 @ save in spsr_svc -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) + @ We must avoid clrex due to Cortex-A15 erratum #830321 strex r1, r2, [sp] @ clear the exclusive monitor -#elif defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor #endif .if \fast ldmdb sp, {r1 - lr}^ @ get calling r1 - lr @@ -123,7 +118,10 @@ .macro svc_exit, rpsr ldr lr, [sp, #S_SP] @ top of the stack ldrd r0, r1, [sp, #S_LR] @ calling lr and pc - clrex @ clear the exclusive monitor + + @ We must avoid clrex due to Cortex-A15 erratum #830321 + strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor + stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context ldmia sp, {r0 - r12} mov sp, lr @@ -132,13 +130,16 @@ .endm .macro restore_user_regs, fast = 0, offset = 0 - clrex @ clear the exclusive monitor mov r2, sp load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC] @ get pc add sp, sp, #\offset + S_SP msr spsr_cxsf, r1 @ save in spsr_svc + + @ We must avoid clrex due to Cortex-A15 erratum #830321 + strex r1, r2, [sp] @ clear the exclusive monitor + .if \fast ldmdb sp, {r1 - r12} @ get calling r1 - r12 .else diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 532f2db5430..00c0e4004ad 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -365,12 +365,10 @@ static void show_data(unsigned long addr, int nbytes, const char *name) u32 *p; /* - * don't attempt to dump non-kernel addresses, values that are probably - * just small negative numbers, or vmalloc addresses that may point to - * memory-mapped peripherals + * don't attempt to dump non-kernel addresses or + * values that are probably just small negative numbers */ - if (addr < PAGE_OFFSET || addr > -256UL || - is_vmalloc_addr((void *)addr)) + if (addr < PAGE_OFFSET || addr > -256UL) return; printk("\n%s: %#lx:\n", name, addr); diff --git a/arch/arm/mach-msm/lge/mako/board-mako-display.c b/arch/arm/mach-msm/lge/mako/board-mako-display.c index 9ceffaf0a33..8dd483bc38f 100644 --- a/arch/arm/mach-msm/lge/mako/board-mako-display.c +++ b/arch/arm/mach-msm/lge/mako/board-mako-display.c @@ -251,7 +251,7 @@ static struct msm_bus_scale_pdata mdp_bus_scale_pdata = { static struct msm_panel_common_pdata mdp_pdata = { .gpio = MDP_VSYNC_GPIO, .mdp_max_clk = 266667000, - .mdp_max_bw = 3000000000u, + .mdp_max_bw = 2000000000UL, .mdp_bw_ab_factor = 115, .mdp_bw_ib_factor = 125, .mdp_bus_scale_table = &mdp_bus_scale_pdata, @@ -815,7 +815,7 @@ static char ief_setC[9] = {0xEC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} static char osc_setting[4] = {0xC0, 0x00, 0x0A, 0x10}; static char power_setting3[13] = {0xC3, 0x00, 0x88, 0x03, 0x20, 0x01, 0x57, 0x4F, 0x33,0x02,0x38,0x38,0x00}; -static char power_setting4[6] = {0xC4, 0x31, 0x24, 0x11, 0x11, 0x3D}; +static char power_setting4[6] = {0xC4, 0x22, 0x24, 0x11, 0x11, 0x3D}; static char power_setting5[4] = {0xC5, 0x3B, 0x3B, 0x03}; #ifdef CONFIG_LGIT_VIDEO_WXGA_CABC diff --git a/arch/arm/mach-msm/lge/mako/board-mako-pmic.c b/arch/arm/mach-msm/lge/mako/board-mako-pmic.c index e851c481211..04d45bfdfcf 100644 --- a/arch/arm/mach-msm/lge/mako/board-mako-pmic.c +++ b/arch/arm/mach-msm/lge/mako/board-mako-pmic.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * Copyright (c) 2012, LGE Inc. * * This program is free software; you can redistribute it and/or modify @@ -120,12 +120,8 @@ struct pm8xxx_mpp_init { /* Initial PM8921 GPIO configurations */ static struct pm8xxx_gpio_init pm8921_gpios[] __initdata = { -#ifdef CONFIG_EARJACK_DEBUGGER PM8921_GPIO_INPUT(13, PM_GPIO_PULL_DN), /* EARJACK_DEBUGGER */ -#endif -#ifdef CONFIG_SLIMPORT_ANX7808 PM8921_GPIO_INPUT(14, PM_GPIO_PULL_DN), /* SLIMPORT_CBL_DET */ -#endif PM8921_GPIO_OUTPUT(15, 0, HIGH), /* ANX_P_DWN_CTL */ PM8921_GPIO_OUTPUT(16, 0, HIGH), /* ANX_AVDD33_EN */ PM8921_GPIO_OUTPUT(17, 0, HIGH), /* CAM_VCM_EN */ @@ -199,12 +195,9 @@ static struct pm8xxx_misc_platform_data apq8064_pm8921_misc_pdata = { #define PM8921_LC_LED_MAX_CURRENT 4 /* I = 4mA */ #define PM8921_LC_LED_LOW_CURRENT 1 /* I = 1mA */ -#ifdef CONFIG_MACH_APQ8064_J1A -#define PM8921_KEY_LED_MAX_CURRENT 6 /* I = 6mA */ -#define PM8XXX_LED_PWM_DUTY_MS0 50 -#define PM8XXX_LED_PWM_DUTY_MS1 512 -#endif -#define PM8XXX_LED_PWM_ADJUST_BRIGHTNESS_E 10 /* max duty percentage */ +#define PM8921_KEY_LED_MAX_CURRENT 6 +#define PM8XXX_LED_PWM_DUTY_MS0 50 +#define PM8XXX_LED_PWM_DUTY_MS1 50 #define PM8XXX_LED_PWM_PERIOD 1000 #define PM8XXX_LED_PWM_DUTY_MS 50 #define PM8XXX_LED_PWM_DUTY_PCTS 16 @@ -218,40 +211,23 @@ static struct pm8xxx_misc_platform_data apq8064_pm8921_misc_pdata = { */ #define PM8XXX_PWM_CHANNEL_NONE -1 -#ifdef CONFIG_MACH_APQ8064_J1A -static struct led_info pm8921_led_info[] = { - [0] = { - .name = "led:red", - }, - [1] = { - .name = "button-backlight", - }, - [2] = { - .name = "led:green", - }, -}; -#else static struct led_info pm8921_led_info[] = { [0] = { .name = "red", }, [1] = { - .name = "green", + .name = "button-backlight", }, [2] = { - .name = "blue", + .name = "green", }, }; -#endif static struct led_platform_data pm8921_led_core_pdata = { .num_leds = ARRAY_SIZE(pm8921_led_info), .leds = pm8921_led_info, }; -#ifdef CONFIG_MACH_APQ8064_J1A - -#ifdef CONFIG_LGE_PM_PWM_LED static int pm8921_led0_pwm_duty_pcts0[60] = { 1, 2, 8, 10, 14, 18, 20, 24, 30, 34, 36, 40, 42, 48, 50, 55, 58, 60, 62, 64, @@ -262,7 +238,7 @@ static int pm8921_led0_pwm_duty_pcts0[60] = { }; static int pm8921_led0_pwm_duty_pcts1[60] = { - 60, 80, 60, 0, 0, 0, 0, 0, 0, 0, + 60, 65, 70, 75, 80, 80, 75, 70, 65, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -279,37 +255,27 @@ static struct pm8xxx_pwm_duty_cycles pm8921_led0_pwm_duty_cycles = { .duty_ms1 = PM8XXX_LED_PWM_DUTY_MS1, .start_idx = 0, }; -#endif static struct pm8xxx_led_config pm8921_led_configs[] = { [0] = { .id = PM8XXX_ID_LED_0, -#ifdef CONFIG_LGE_PM_PWM_LED .mode = PM8XXX_LED_MODE_PWM2, .pwm_channel = 5, .pwm_period_us = PM8XXX_LED_PWM_PERIOD, .pwm_duty_cycles = &pm8921_led0_pwm_duty_cycles, -#else - .mode = PM8XXX_LED_MODE_MANUAL, -#endif .max_current = PM8921_LC_LED_MAX_CURRENT, }, [1] = { .id = PM8XXX_ID_LED_1, .mode = PM8XXX_LED_MODE_MANUAL, .max_current = PM8921_KEY_LED_MAX_CURRENT, - }, [2] = { .id = PM8XXX_ID_LED_2, -#ifdef CONFIG_LGE_PM_PWM_LED .mode = PM8XXX_LED_MODE_PWM1, .pwm_channel = 4, .pwm_period_us = PM8XXX_LED_PWM_PERIOD, .pwm_duty_cycles = &pm8921_led0_pwm_duty_cycles, -#else - .mode = PM8XXX_LED_MODE_MANUAL, -#endif .max_current = PM8921_LC_LED_MAX_CURRENT, }, }; @@ -318,84 +284,9 @@ static struct pm8xxx_led_platform_data apq8064_pm8921_leds_pdata = { .led_core = &pm8921_led_core_pdata, .configs = pm8921_led_configs, .num_configs = ARRAY_SIZE(pm8921_led_configs), + .use_pwm = 1, }; -#else - -static int pm8921_led0_pwm_duty_pcts[PM8XXX_LED_PWM_DUTY_PCTS] = {0,}; -static int pm8921_led1_pwm_duty_pcts[PM8XXX_LED_PWM_DUTY_PCTS] = {0,}; -static int pm8921_led2_pwm_duty_pcts[PM8XXX_LED_PWM_DUTY_PCTS] = {0,}; - -static struct pm8xxx_pwm_duty_cycles pm8921_led0_pwm_duty_cycles = { - .duty_pcts = (int *)&pm8921_led0_pwm_duty_pcts, - .num_duty_pcts = PM8XXX_LED_PWM_DUTY_PCTS, - .duty_ms = PM8XXX_LED_PWM_DUTY_MS, - .start_idx = PM8XXX_LED_PWM_START_IDX0, -}; - -static struct pm8xxx_pwm_duty_cycles pm8921_led1_pwm_duty_cycles = { - .duty_pcts = (int *)&pm8921_led1_pwm_duty_pcts, - .num_duty_pcts = PM8XXX_LED_PWM_DUTY_PCTS, - .duty_ms = PM8XXX_LED_PWM_DUTY_MS, - .start_idx = PM8XXX_LED_PWM_START_IDX1, -}; - -static struct pm8xxx_pwm_duty_cycles pm8921_led2_pwm_duty_cycles = { - .duty_pcts = (int *)&pm8921_led2_pwm_duty_pcts, - .num_duty_pcts = PM8XXX_LED_PWM_DUTY_PCTS, - .duty_ms = PM8XXX_LED_PWM_DUTY_MS, - .start_idx = PM8XXX_LED_PWM_START_IDX2, -}; - -static struct pm8xxx_led_config pm8921_led_configs[] = { - [0] = { - .id = PM8XXX_ID_LED_0, - .mode = PM8XXX_LED_MODE_PWM3, - .max_current = PM8921_LC_LED_MAX_CURRENT, - .pwm_channel = 6, - .pwm_period_us = PM8XXX_LED_PWM_PERIOD, - .pwm_duty_cycles = &pm8921_led0_pwm_duty_cycles -// .pwm_adjust_brightness = 52, - }, - [1] = { - .id = PM8XXX_ID_LED_1, - .mode = PM8XXX_LED_MODE_PWM2, - .max_current = PM8921_LC_LED_MAX_CURRENT, - .pwm_channel = 5, - .pwm_period_us = PM8XXX_LED_PWM_PERIOD, - .pwm_duty_cycles = &pm8921_led1_pwm_duty_cycles -// .pwm_adjust_brightness = 43, - }, - [2] = { - .id = PM8XXX_ID_LED_2, - .mode = PM8XXX_LED_MODE_PWM1, - .max_current = PM8921_LC_LED_MAX_CURRENT, - .pwm_channel = 4, - .pwm_period_us = PM8XXX_LED_PWM_PERIOD, - .pwm_duty_cycles = &pm8921_led2_pwm_duty_cycles -// .pwm_adjust_brightness = 75, - }, -}; - -static __init void mako_fixed_leds(void) { -/* - if (lge_get_board_revno() <= HW_REV_E) { - int i = 0; - for (i = 0; i < ARRAY_SIZE(pm8921_led_configs); i++) - pm8921_led_configs[i].pwm_adjust_brightness = - PM8XXX_LED_PWM_ADJUST_BRIGHTNESS_E; - } -*/ -} - -static struct pm8xxx_led_platform_data apq8064_pm8921_leds_pdata = { - .led_core = &pm8921_led_core_pdata, - .configs = pm8921_led_configs, - .num_configs = ARRAY_SIZE(pm8921_led_configs) -// .use_pwm = 1, -}; -#endif - static struct pm8xxx_adc_amux apq8064_pm8921_adc_channels_data[] = { {"vcoin", CHANNEL_VCOIN, CHAN_PATH_SCALING2, AMUX_RSV1, ADC_DECIMATION_TYPE2, ADC_SCALE_DEFAULT}, @@ -528,7 +419,7 @@ static int wireless_charger_is_plugged(void) } return !(gpio_get_value(wlc_active_n)); -}: +} static __init void mako_fixup_wlc_gpio(void) { if (lge_get_board_revno() >= HW_REV_1_1) @@ -912,13 +803,6 @@ void __init apq8064_init_pmic(void) mako_set_adcmap(); -#if !defined(CONFIG_MACH_APQ8064_J1A) - mako_fixed_leds(); -#endif -#ifdef CONFIG_WIRELESS_CHARGER - mako_fixup_wlc_gpio(); -#endif - apq8064_device_ssbi_pmic1.dev.platform_data = &apq8064_ssbi_pm8921_pdata; apq8064_device_ssbi_pmic2.dev.platform_data = diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c index de26ac752f8..32ea4dde767 100644 --- a/arch/arm/mach-msm/msm_bus/msm_bus_arb.c +++ b/arch/arm/mach-msm/msm_bus/msm_bus_arb.c @@ -576,7 +576,8 @@ int msm_bus_scale_client_update_request(uint32_t cl, unsigned index) pdata = client->pdata; if (!pdata) { MSM_BUS_ERR("Null pdata passed to update-request\n"); - return -ENXIO; + ret = -ENXIO; + goto err; } if (index >= pdata->num_usecases) { diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 80741992a9f..5d777a567c3 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -17,12 +17,6 @@ */ .align 5 ENTRY(v6_early_abort) -#ifdef CONFIG_CPU_V6 - sub r1, sp, #4 @ Get unused stack location - strex r0, r1, [r1] @ Clear the exclusive monitor -#elif defined(CONFIG_CPU_32v6K) - clrex -#endif mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR /* diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index 703375277ba..4812ad05421 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -13,12 +13,6 @@ */ .align 5 ENTRY(v7_early_abort) - /* - * The effect of data aborts on on the exclusive access monitor are - * UNPREDICTABLE. Do a CLREX to clear the state - */ - clrex - mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index d3a9f012aa0..b6fb65030d5 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -265,6 +265,18 @@ static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id) } #endif +static char __read_mostly octeon_system_type[80]; + +static int __init init_octeon_system_type(void) +{ + snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)", + cvmx_board_type_to_string(octeon_bootinfo->board_type), + octeon_model_get_string(read_c0_prid())); + + return 0; +} +early_initcall(init_octeon_system_type); + /** * Return a string representing the system type * @@ -272,11 +284,7 @@ static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id) */ const char *octeon_board_type_string(void) { - static char name[80]; - sprintf(name, "%s (%s)", - cvmx_board_type_to_string(octeon_bootinfo->board_type), - octeon_model_get_string(read_c0_prid())); - return name; + return octeon_system_type; } const char *get_system_type(void) diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 811084f4e42..52f60e54872 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -162,11 +162,6 @@ static unsigned int counters_total_to_per_cpu(unsigned int counters) return counters >> vpe_shift(); } -static unsigned int counters_per_cpu_to_total(unsigned int counters) -{ - return counters << vpe_shift(); -} - #else /* !CONFIG_MIPS_MT_SMP */ #define vpe_id() 0 diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index bda8eb26ece..fdd6042843e 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -598,6 +599,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) /* Catch bad driver code */ BUG_ON(size == 0); + preempt_disable(); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); @@ -618,6 +620,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache_range(addr, addr + size); } + preempt_enable(); bc_wback_inv(addr, size); __sync(); @@ -628,6 +631,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) /* Catch bad driver code */ BUG_ON(size == 0); + preempt_disable(); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); @@ -663,6 +667,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); blast_inv_dcache_range(addr, addr + size); } + preempt_enable(); bc_inv(addr, size); __sync(); diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S index 1088b5fca3b..27451968e45 100644 --- a/arch/openrisc/kernel/head.S +++ b/arch/openrisc/kernel/head.S @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index eeb8054c7cd..1f3e9ea1cf9 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -6,6 +6,7 @@ config UNICORE32 select HAVE_DMA_ATTRS select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 + select GENERIC_ATOMIC64 select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA select GENERIC_FIND_FIRST_BIT diff --git a/arch/unicore32/include/asm/bug.h b/arch/unicore32/include/asm/bug.h index b1ff8cadb08..93a56f3e234 100644 --- a/arch/unicore32/include/asm/bug.h +++ b/arch/unicore32/include/asm/bug.h @@ -19,9 +19,4 @@ extern void die(const char *msg, struct pt_regs *regs, int err); extern void uc32_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, unsigned long err, unsigned long trap); -extern asmlinkage void __backtrace(void); -extern asmlinkage void c_backtrace(unsigned long fp, int pmode); - -extern void __show_regs(struct pt_regs *); - #endif /* __UNICORE_BUG_H__ */ diff --git a/arch/unicore32/include/asm/cmpxchg.h b/arch/unicore32/include/asm/cmpxchg.h index df4d5acfd19..8e797ad4fa2 100644 --- a/arch/unicore32/include/asm/cmpxchg.h +++ b/arch/unicore32/include/asm/cmpxchg.h @@ -35,7 +35,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, : "memory", "cc"); break; default: - ret = __xchg_bad_pointer(); + __xchg_bad_pointer(); } return ret; diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h index f23955028a1..30f749da8f7 100644 --- a/arch/unicore32/kernel/setup.h +++ b/arch/unicore32/kernel/setup.h @@ -30,4 +30,10 @@ extern char __vectors_start[], __vectors_end[]; extern void kernel_thread_helper(void); extern void __init early_signal_init(void); + +extern asmlinkage void __backtrace(void); +extern asmlinkage void c_backtrace(unsigned long fp, int pmode); + +extern void __show_regs(struct pt_regs *); + #endif diff --git a/arch/xtensa/include/asm/ioctls.h b/arch/xtensa/include/asm/ioctls.h index fd1d1369a40..96341aabceb 100644 --- a/arch/xtensa/include/asm/ioctls.h +++ b/arch/xtensa/include/asm/ioctls.h @@ -28,17 +28,17 @@ #define TCSETSW 0x5403 #define TCSETSF 0x5404 -#define TCGETA _IOR('t', 23, struct termio) -#define TCSETA _IOW('t', 24, struct termio) -#define TCSETAW _IOW('t', 25, struct termio) -#define TCSETAF _IOW('t', 28, struct termio) +#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */ +#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */ +#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */ +#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */ #define TCSBRK _IO('t', 29) #define TCXONC _IO('t', 30) #define TCFLSH _IO('t', 31) -#define TIOCSWINSZ _IOW('t', 103, struct winsize) -#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */ +#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */ #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ #define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ #define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ @@ -88,7 +88,6 @@ #define TIOCSETD _IOW('T', 35, int) #define TIOCGETD _IOR('T', 36, int) #define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/ -#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/ #define TIOCSBRK _IO('T', 39) /* BSD compatibility */ #define TIOCCBRK _IO('T', 40) /* BSD compatibility */ #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ @@ -111,8 +110,10 @@ #define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ # define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ -#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */ -#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */ +#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */ + /* _IOR('T', 90, struct serial_multiport_struct) */ +#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */ + /* _IOW('T', 91, struct serial_multiport_struct) */ #define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index b03c043ce75..7eeaf22fc7b 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -68,7 +68,12 @@ #define VMALLOC_START 0xC0000000 #define VMALLOC_END 0xC7FEFFFF #define TLBTEMP_BASE_1 0xC7FF0000 -#define TLBTEMP_BASE_2 0xC7FF8000 +#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE) +#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE +#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE) +#else +#define TLBTEMP_SIZE ICACHE_WAY_SIZE +#endif /* * Xtensa Linux config PTE layout (when present): diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 6223f3346b5..e01cffcc35d 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1053,9 +1053,8 @@ ENTRY(fast_syscall_xtensa) movi a7, 4 # sizeof(unsigned int) access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp - addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 - _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill - _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp + _bgeui a6, SYS_XTENSA_COUNT, .Lill + _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp /* Fall through for ATOMIC_CMP_SWP. */ @@ -1067,27 +1066,26 @@ TRY s32i a5, a3, 0 # different, modify value l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 1 # and return 1 - addi a6, a6, 1 # restore a6 (really necessary?) rfe 1: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 0 # return 0 (note that we cannot set - addi a6, a6, 1 # restore a6 (really necessary?) rfe .Lnswp: /* Atomic set, add, and exg_add. */ TRY l32i a7, a3, 0 # orig + addi a6, a6, -SYS_XTENSA_ATOMIC_SET add a0, a4, a7 # + arg moveqz a0, a4, a6 # set + addi a6, a6, SYS_XTENSA_ATOMIC_SET TRY s32i a0, a3, 0 # write new value mov a0, a2 mov a2, a7 l32i a7, a0, PT_AREG7 # restore a7 l32i a0, a0, PT_AREG0 # restore a0 - addi a6, a6, 1 # restore a6 (really necessary?) rfe CATCH @@ -1096,7 +1094,7 @@ CATCH movi a2, -EFAULT rfe -.Lill: l32i a7, a2, PT_AREG0 # restore a7 +.Lill: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, -EINVAL rfe @@ -1629,7 +1627,7 @@ ENTRY(fast_second_level_miss) rsr a0, EXCVADDR bltu a0, a3, 2f - addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) + addi a1, a0, -TLBTEMP_SIZE bgeu a1, a3, 2f /* Check if we have to restore an ITLB mapping. */ diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 2783fda76dd..c055c91a96e 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -48,9 +48,8 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) /* We currently don't support coherent memory outside KSEG */ - if (ret < XCHAL_KSEG_CACHED_VADDR - || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE) - BUG(); + BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || + ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); if (ret != 0) { @@ -66,10 +65,11 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { - long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR; + unsigned long addr = (unsigned long)vaddr + + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; - if (addr < 0 || addr >= XCHAL_KSEG_SIZE) - BUG(); + BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || + addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); free_pages(addr, get_order(size)); } diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index e265f835c95..19759d3df45 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c @@ -586,7 +586,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, * Note: Original code is ata_bus_softreset(). */ -static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, +static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, unsigned long deadline) { struct ata_ioports *ioaddr = &ap->ioaddr; @@ -600,9 +600,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, udelay(20); out_be32(ioaddr->ctl_addr, ap->ctl); - scc_wait_after_reset(&ap->link, devmask, deadline); - - return 0; + return scc_wait_after_reset(&ap->link, devmask, deadline); } /** @@ -619,7 +617,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes, { struct ata_port *ap = link->ap; unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; - unsigned int devmask = 0, err_mask; + unsigned int devmask = 0; + int rc; u8 err; DPRINTK("ENTER\n"); @@ -635,9 +634,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes, /* issue bus reset */ DPRINTK("about to softreset, devmask=%x\n", devmask); - err_mask = scc_bus_softreset(ap, devmask, deadline); - if (err_mask) { - ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", err_mask); + rc = scc_bus_softreset(ap, devmask, deadline); + if (rc) { + ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc); return -EIO; } diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 0ff5c2ea140..faa408f16f6 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -725,7 +725,7 @@ static int hpet_is_known(struct hpet_data *hdp) return 0; } -static ctl_table hpet_table[] = { +static struct ctl_table hpet_table[] = { { .procname = "max-user-freq", .data = &hpet_max_freq, @@ -736,7 +736,7 @@ static ctl_table hpet_table[] = { {} }; -static ctl_table hpet_root[] = { +static struct ctl_table hpet_root[] = { { .procname = "hpet", .maxlen = 0, @@ -746,7 +746,7 @@ static ctl_table hpet_root[] = { {} }; -static ctl_table dev_root[] = { +static struct ctl_table dev_root[] = { { .procname = "dev", .maxlen = 0, diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index 2efa176beab..9f2e3be2c5b 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -659,7 +659,7 @@ static struct ipmi_smi_watcher smi_watcher = { #ifdef CONFIG_PROC_FS #include -static ctl_table ipmi_table[] = { +static struct ctl_table ipmi_table[] = { { .procname = "poweroff_powercycle", .data = &poweroff_powercycle, .maxlen = sizeof(poweroff_powercycle), @@ -668,14 +668,14 @@ static ctl_table ipmi_table[] = { { } }; -static ctl_table ipmi_dir_table[] = { +static struct ctl_table ipmi_dir_table[] = { { .procname = "ipmi", .mode = 0555, .child = ipmi_table }, { } }; -static ctl_table ipmi_root_table[] = { +static struct ctl_table ipmi_root_table[] = { { .procname = "dev", .mode = 0555, .child = ipmi_dir_table }, diff --git a/drivers/char/random.c b/drivers/char/random.c index cdf4cfb2da4..7be362cd81c 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -308,7 +308,7 @@ static int random_read_wakeup_thresh = 64; static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS; /* - * The minimum number of seconds between urandom pool resending. We + * The minimum number of seconds between urandom pool reseeding. We * do this to limit the amount of entropy that can be drained from the * input pool even if there are heavy demands on /dev/urandom. */ @@ -325,7 +325,7 @@ static int random_min_urandom_seed = 60; * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR * generators. ACM Transactions on Modeling and Computer Simulation * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted - * GFSR generators II. ACM Transactions on Mdeling and Computer + * GFSR generators II. ACM Transactions on Modeling and Computer * Simulation 4:254-266) * * Thanks to Colin Plumb for suggesting this. @@ -644,7 +644,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) } while (unlikely(entropy_count < pool_size-2 && pnfrac)); } - if (entropy_count < 0) { + if (unlikely(entropy_count < 0)) { pr_warn("random: negative entropy/overflow: pool %s count %d\n", r->name, entropy_count); WARN_ON(1); @@ -670,10 +670,10 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) r->entropy_total, _RET_IP_); if (r == &input_pool) { - int entropy_bytes = entropy_count >> ENTROPY_SHIFT; + int entropy_bits = entropy_count >> ENTROPY_SHIFT; /* should we wake readers? */ - if (entropy_bytes >= random_read_wakeup_thresh) { + if (entropy_bits >= random_read_wakeup_thresh) { wake_up_interruptible(&random_read_wait); kill_fasync(&fasync, SIGIO, POLL_IN); } @@ -682,7 +682,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) * forth between them, until the output pools are 75% * full. */ - if (entropy_bytes > random_write_wakeup_thresh && + if (entropy_bits > random_write_wakeup_thresh && r->initialized && r->entropy_total >= 2*random_read_wakeup_thresh) { static struct entropy_store *last = &blocking_pool; @@ -976,45 +976,44 @@ static void push_to_pool(struct work_struct *work) static size_t account(struct entropy_store *r, size_t nbytes, int min, int reserved) { - unsigned long flags; - int wakeup_write = 0; - int have_bytes; int entropy_count, orig; - size_t ibytes; - - /* Hold lock while accounting */ - spin_lock_irqsave(&r->lock, flags); + size_t ibytes, nfrac; BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); /* Can we pull enough? */ retry: entropy_count = orig = ACCESS_ONCE(r->entropy_count); - have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); ibytes = nbytes; - if (have_bytes < min + reserved) { - ibytes = 0; - } else { - /* If limited, never pull more than available */ - if (r->limit && ibytes + reserved >= have_bytes) - ibytes = have_bytes - reserved; - - if (have_bytes >= ibytes + reserved) - entropy_count -= ibytes << (ENTROPY_SHIFT + 3); - else - entropy_count = reserved << (ENTROPY_SHIFT + 3); + /* If limited, never pull more than available */ + if (r->limit) { + int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) - goto retry; + if ((have_bytes -= reserved) < 0) + have_bytes = 0; + ibytes = min_t(size_t, ibytes, have_bytes); + } + if (ibytes < min) + ibytes = 0; - if ((r->entropy_count >> ENTROPY_SHIFT) - < random_write_wakeup_thresh) - wakeup_write = 1; + if (unlikely(entropy_count < 0)) { + pr_warn("random: negative entropy count: pool %s count %d\n", + r->name, entropy_count); + WARN_ON(1); + entropy_count = 0; } - spin_unlock_irqrestore(&r->lock, flags); + nfrac = ibytes << (ENTROPY_SHIFT + 3); + if ((size_t) entropy_count > nfrac) + entropy_count -= nfrac; + else + entropy_count = 0; + + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) + goto retry; trace_debit_entropy(r->name, 8 * ibytes); - if (wakeup_write) { + if (ibytes && + (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_thresh) { wake_up_interruptible(&random_write_wait); kill_fasync(&fasync, SIGIO, POLL_OUT); } @@ -1033,23 +1032,23 @@ static void extract_buf(struct entropy_store *r, __u8 *out) __u8 extract[64]; unsigned long flags; - /* Generate a hash across the pool, 16 words (512 bits) at a time */ - sha_init(hash.w); - spin_lock_irqsave(&r->lock, flags); - for (i = 0; i < r->poolinfo->poolwords; i += 16) - sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); - /* - * If we have a architectural hardware random number - * generator, mix that in, too. + * If we have an architectural hardware random number + * generator, use it for SHA's initial vector */ + sha_init(hash.w); for (i = 0; i < LONGS(20); i++) { unsigned long v; if (!arch_get_random_long(&v)) break; - hash.l[i] ^= v; + hash.l[i] = v; } + /* Generate a hash across the pool, 16 words (512 bits) at a time */ + spin_lock_irqsave(&r->lock, flags); + for (i = 0; i < r->poolinfo->poolwords; i += 16) + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); + /* * We mix the hash back into the pool to prevent backtracking * attacks (where the attacker knows the state of the pool @@ -1174,8 +1173,9 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, /* * This function is the exported kernel interface. It returns some * number of good random numbers, suitable for key generation, seeding - * TCP sequence numbers, etc. It does not use the hw random number - * generator, if available; use get_random_bytes_arch() for that. + * TCP sequence numbers, etc. It does not rely on the hardware random + * number generator. For random bytes direct from the hardware RNG + * (when available), use get_random_bytes_arch(). */ void get_random_bytes(void *buf, int nbytes) { @@ -1288,53 +1288,32 @@ void rand_initialize_disk(struct gendisk *disk) static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { - ssize_t n, retval = 0, count = 0; + ssize_t n; if (nbytes == 0) return 0; - while (nbytes > 0) { - n = nbytes; - if (n > SEC_XFER_SIZE) - n = SEC_XFER_SIZE; - - n = extract_entropy_user(&blocking_pool, buf, n); - - if (n < 0) { - retval = n; - break; - } - + nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE); + while (1) { + n = extract_entropy_user(&blocking_pool, buf, nbytes); + if (n < 0) + return n; trace_random_read(n*8, (nbytes-n)*8, ENTROPY_BITS(&blocking_pool), ENTROPY_BITS(&input_pool)); - - if (n == 0) { - if (file->f_flags & O_NONBLOCK) { - retval = -EAGAIN; - break; - } - - wait_event_interruptible(random_read_wait, - ENTROPY_BITS(&input_pool) >= - random_read_wakeup_thresh); - - if (signal_pending(current)) { - retval = -ERESTARTSYS; - break; - } - - continue; - } - - count += n; - buf += n; - nbytes -= n; - break; /* This break makes the device work */ - /* like a named pipe */ + if (n > 0) + return n; + /* Pool is (near) empty. Maybe wait and retry. */ + + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + wait_event_interruptible(random_read_wait, + ENTROPY_BITS(&input_pool) >= + random_read_wakeup_thresh); + if (signal_pending(current)) + return -ERESTARTSYS; } - - return (count ? count : retval); } static ssize_t @@ -1347,6 +1326,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) "with %d bits of entropy available\n", current->comm, nonblocking_pool.entropy_total); + nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), @@ -1516,13 +1496,13 @@ static int max_write_thresh = INPUT_POOL_WORDS * 32; static char sysctl_bootid[16]; /* - * These functions is used to return both the bootid UUID, and random + * This function is used to return both the bootid UUID, and random * UUID. The difference is in whether table->data is NULL; if it is, * then a new UUID is generated and returned to the user. * - * If the user accesses this via the proc interface, it will be returned - * as an ASCII string in the standard UUID format. If accesses via the - * sysctl system call, it is returned as 16 bytes of binary data. + * If the user accesses this via the proc interface, the UUID will be + * returned as an ASCII string in the standard UUID format; if via the + * sysctl system call, as 16 bytes of binary data. */ static int proc_do_uuid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -1554,10 +1534,10 @@ static int proc_do_uuid(struct ctl_table *table, int write, /* * Return entropy available scaled to integral bits */ -static int proc_do_entropy(ctl_table *table, int write, +static int proc_do_entropy(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - ctl_table fake_table; + struct ctl_table fake_table; int entropy_count; entropy_count = *(int *)table->data >> ENTROPY_SHIFT; diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index af9437488b6..7e69c5d068f 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -280,7 +280,7 @@ static irqreturn_t rtc_interrupt(int irq, void *dev_id) /* * sysctl-tuning infrastructure. */ -static ctl_table rtc_table[] = { +static struct ctl_table rtc_table[] = { { .procname = "max-user-freq", .data = &rtc_max_user_freq, @@ -291,7 +291,7 @@ static ctl_table rtc_table[] = { { } }; -static ctl_table rtc_root[] = { +static struct ctl_table rtc_root[] = { { .procname = "rtc", .mode = 0555, @@ -300,7 +300,7 @@ static ctl_table rtc_root[] = { { } }; -static ctl_table dev_root[] = { +static struct ctl_table dev_root[] = { { .procname = "dev", .mode = 0555, diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 0d9fa9781ca..1817c95c273 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -142,6 +142,23 @@ config CPU_FREQ_DEFAULT_GOV_LAZY help Use the CPUFreq governor 'lazy' as default. +config CPU_FREQ_DEFAULT_GOV_INTELLIACTIVE + bool "intelliactive" + select CPU_FREQ_GOV_INTELLIACTIVE + help + Use the CPUFreq governor 'intelliactive' as default. This allows + you to get a full dynamic cpu frequency capable system by simply + loading your cpufreq low-level hardware driver, using the + 'interactive' governor for latency-sensitive workloads. + +config CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND + bool "intellidemand" + select CPU_FREQ_GOV_INTELLIDEMAND + help + Use the CPUFreq governor 'ondemand' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + endchoice config CPU_FREQ_GOV_PERFORMANCE @@ -259,6 +276,33 @@ config CPU_FREQ_GOV_LAZY tristate "'lazy' cpufreq governor" depends on CPU_FREQ +config CPU_FREQ_GOV_INTELLIACTIVE + tristate "'intelliactive' cpufreq policy governor" + help + 'intelliactive' - This driver adds a dynamic cpufreq policy governor + designed for latency-sensitive workloads. + + This governor attempts to reduce the latency of clock + increases so that the system is more responsive to + interactive workloads. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_interactive. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_INTELLIDEMAND + tristate "'intellidemand' cpufreq policy governor" + help + 'intellidemand' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and + changes frequency based on the CPU utilization. + The support for this governor depends on CPU capability to + do fast frequency switching (i.e, very low latency frequency + transitions). + menu "x86 CPU frequency scaling drivers" depends on X86 source "drivers/cpufreq/Kconfig.x86" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index a42cb8b7804..e98d5d5c039 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -14,6 +14,8 @@ obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o obj-$(CONFIG_CPU_FREQ_GOV_SMARTMAX) += cpufreq_smartmax.o obj-$(CONFIG_CPU_FREQ_GOV_WHEATLEY) += cpufreq_wheatley.o obj-$(CONFIG_CPU_FREQ_GOV_LAZY) += cpufreq_lazy.o +obj-$(CONFIG_CPU_FREQ_GOV_INTELLIACTIVE)+= cpufreq_intelliactive.o +obj-$(CONFIG_CPU_FREQ_GOV_INTELLIDEMAND)+= cpufreq_intellidemand.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq_intelliactive.c b/drivers/cpufreq/cpufreq_intelliactive.c new file mode 100644 index 00000000000..690b3940975 --- /dev/null +++ b/drivers/cpufreq/cpufreq_intelliactive.c @@ -0,0 +1,1543 @@ +/* + * drivers/cpufreq/cpufreq_intelliactive.c + * + * Copyright (C) 2010 Google, Inc. + * Copyright (C) 2014 Paul Reioux + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Author: Mike Chan (mike@android.com) + * Author: Paul Reioux (reioux@gmail.com) Modified for intelliactive + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int active_count; + +struct cpufreq_interactive_cpuinfo { + struct timer_list cpu_timer; + struct timer_list cpu_slack_timer; + spinlock_t load_lock; /* protects the next 4 fields */ + u64 time_in_idle; + u64 time_in_idle_timestamp; + u64 cputime_speedadj; + u64 cputime_speedadj_timestamp; + struct cpufreq_policy *policy; + struct cpufreq_frequency_table *freq_table; + unsigned int target_freq; + unsigned int floor_freq; + u64 floor_validate_time; + u64 hispeed_validate_time; + struct rw_semaphore enable_sem; + int governor_enabled; + int prev_load; + unsigned int two_phase_freq; +}; + +static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo); + +/* realtime thread handles frequency scaling */ +static struct task_struct *speedchange_task; +static cpumask_t speedchange_cpumask; +static spinlock_t speedchange_cpumask_lock; +static struct mutex gov_lock; + +/* Hi speed to bump to from lo speed when load burst (default max) */ +static unsigned int hispeed_freq = 1026000; + +/* Go to hi speed when CPU load at or above this value. */ +#define DEFAULT_GO_HISPEED_LOAD 99 +static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD; + +/* Sampling down factor to be applied to min_sample_time at max freq */ +static unsigned int sampling_down_factor = 100000; + +/* Target load. Lower values result in higher CPU speeds. */ +#define DEFAULT_TARGET_LOAD 90 +static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD}; +static spinlock_t target_loads_lock; +static unsigned int *target_loads = default_target_loads; +static int ntarget_loads = ARRAY_SIZE(default_target_loads); + +/* + * The minimum amount of time to spend at a frequency before we can ramp down. + */ +#define DEFAULT_MIN_SAMPLE_TIME 40000 +static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME; + +/* + * The sample rate of the timer used to increase frequency + */ +#define DEFAULT_TIMER_RATE 30000 +static unsigned long timer_rate = DEFAULT_TIMER_RATE; + +/* Busy SDF parameters*/ +#define MIN_BUSY_TIME (100 * USEC_PER_MSEC) + +/* + * Wait this long before raising speed above hispeed, by default a single + * timer interval. + */ +#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE +static unsigned int default_above_hispeed_delay[] = { + DEFAULT_ABOVE_HISPEED_DELAY }; +static spinlock_t above_hispeed_delay_lock; +static unsigned int *above_hispeed_delay = default_above_hispeed_delay; +static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay); + +/* Non-zero means indefinite speed boost active */ +static int boost_val; +/* Duration of a boot pulse in usecs */ +static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME; +/* End time of boost pulse in ktime converted to usecs */ +static u64 boostpulse_endtime; + +/* + * Max additional time to wait in idle, beyond timer_rate, at speeds above + * minimum before wakeup to reduce speed, or -1 if unnecessary. + */ +#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE) +static int timer_slack_val = DEFAULT_TIMER_SLACK; + +static bool io_is_busy = 1; + +/* + * If the max load among other CPUs is higher than up_threshold_any_cpu_load + * or if the highest frequency among the other CPUs is higher than + * up_threshold_any_cpu_freq then do not let the frequency to drop below + * sync_freq + */ +static unsigned int up_threshold_any_cpu_load = 95; +static unsigned int sync_freq = 702000; +static unsigned int up_threshold_any_cpu_freq = 1026000; + +static int two_phase_freq_array[NR_CPUS] = {[0 ... NR_CPUS-1] = 1350000} ; + +static int cpufreq_governor_intelliactive(struct cpufreq_policy *policy, + unsigned int event); + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIACTIVE +static +#endif +struct cpufreq_governor cpufreq_gov_intelliactive = { + .name = "intelliactive", + .governor = cpufreq_governor_intelliactive, + .max_transition_latency = 10000000, + .owner = THIS_MODULE, +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + else if (!io_busy) + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; +} + + +static void cpufreq_interactive_timer_resched( + struct cpufreq_interactive_cpuinfo *pcpu) +{ + unsigned long expires; + unsigned long flags; + + spin_lock_irqsave(&pcpu->load_lock, flags); + pcpu->time_in_idle = + get_cpu_idle_time(smp_processor_id(), + &pcpu->time_in_idle_timestamp, io_is_busy); + pcpu->cputime_speedadj = 0; + pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; + expires = jiffies + usecs_to_jiffies(timer_rate); + mod_timer_pinned(&pcpu->cpu_timer, expires); + + if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { + expires += usecs_to_jiffies(timer_slack_val); + mod_timer_pinned(&pcpu->cpu_slack_timer, expires); + } + + spin_unlock_irqrestore(&pcpu->load_lock, flags); +} + +/* The caller shall take enable_sem write semaphore to avoid any timer race. + * The cpu_timer and cpu_slack_timer must be deactivated when calling this + * function. + */ +static void cpufreq_interactive_timer_start(int cpu) +{ + struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); + unsigned long expires = jiffies + usecs_to_jiffies(timer_rate); + unsigned long flags; + + pcpu->cpu_timer.expires = expires; + if (cpu_online(cpu)) { + add_timer_on(&pcpu->cpu_timer, cpu); + if (timer_slack_val >= 0 && pcpu->target_freq > + pcpu->policy->min) { + expires += usecs_to_jiffies(timer_slack_val); + pcpu->cpu_slack_timer.expires = expires; + add_timer_on(&pcpu->cpu_slack_timer, cpu); + } + } + + spin_lock_irqsave(&pcpu->load_lock, flags); + pcpu->time_in_idle = + get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp, io_is_busy); + pcpu->cputime_speedadj = 0; + pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; + spin_unlock_irqrestore(&pcpu->load_lock, flags); +} + +static unsigned int freq_to_above_hispeed_delay(unsigned int freq) +{ + int i; + unsigned int ret; + unsigned long flags; + + spin_lock_irqsave(&above_hispeed_delay_lock, flags); + + for (i = 0; i < nabove_hispeed_delay - 1 && + freq >= above_hispeed_delay[i+1]; i += 2) + ; + + ret = above_hispeed_delay[i]; + ret = (ret > (1 * USEC_PER_MSEC)) ? (ret - (1 * USEC_PER_MSEC)) : ret; + + spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); + return ret; +} + +static unsigned int freq_to_targetload(unsigned int freq) +{ + int i; + unsigned int ret; + unsigned long flags; + + spin_lock_irqsave(&target_loads_lock, flags); + + for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2) + ; + + ret = target_loads[i]; + spin_unlock_irqrestore(&target_loads_lock, flags); + return ret; +} + +/* + * If increasing frequencies never map to a lower target load then + * choose_freq() will find the minimum frequency that does not exceed its + * target load given the current load. + */ + +static unsigned int choose_freq( + struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq) +{ + unsigned int freq = pcpu->policy->cur; + unsigned int prevfreq, freqmin, freqmax; + unsigned int tl; + int index; + + freqmin = 0; + freqmax = UINT_MAX; + + do { + prevfreq = freq; + tl = freq_to_targetload(freq); + + /* + * Find the lowest frequency where the computed load is less + * than or equal to the target load. + */ + + if (cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, loadadjfreq / tl, + CPUFREQ_RELATION_L, &index)) + break; + freq = pcpu->freq_table[index].frequency; + + if (freq > prevfreq) { + /* The previous frequency is too low. */ + freqmin = prevfreq; + + if (freq >= freqmax) { + /* + * Find the highest frequency that is less + * than freqmax. + */ + if (cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + freqmax - 1, CPUFREQ_RELATION_H, + &index)) + break; + freq = pcpu->freq_table[index].frequency; + + if (freq == freqmin) { + /* + * The first frequency below freqmax + * has already been found to be too + * low. freqmax is the lowest speed + * we found that is fast enough. + */ + freq = freqmax; + break; + } + } + } else if (freq < prevfreq) { + /* The previous frequency is high enough. */ + freqmax = prevfreq; + + if (freq <= freqmin) { + /* + * Find the lowest frequency that is higher + * than freqmin. + */ + if (cpufreq_frequency_table_target( + pcpu->policy, pcpu->freq_table, + freqmin + 1, CPUFREQ_RELATION_L, + &index)) + break; + freq = pcpu->freq_table[index].frequency; + + /* + * If freqmax is the first frequency above + * freqmin then we have already found that + * this speed is fast enough. + */ + if (freq == freqmax) + break; + } + } + + /* If same frequency chosen as previous then done. */ + } while (freq != prevfreq); + + return freq; +} + +static u64 update_load(int cpu) +{ + struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); + u64 now; + u64 now_idle; + unsigned int delta_idle; + unsigned int delta_time; + u64 active_time; + + now_idle = get_cpu_idle_time(cpu, &now, io_is_busy); + delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle); + delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp); + + if (delta_time <= delta_idle) + active_time = 0; + else + active_time = delta_time - delta_idle; + + pcpu->cputime_speedadj += active_time * pcpu->policy->cur; + + pcpu->time_in_idle = now_idle; + pcpu->time_in_idle_timestamp = now; + return now; +} + +static void cpufreq_interactive_timer(unsigned long data) +{ + u64 now; + unsigned int delta_time; + u64 cputime_speedadj; + int cpu_load; + struct cpufreq_interactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, data); + unsigned int new_freq; + unsigned int loadadjfreq; + unsigned int index; + unsigned long flags; + bool boosted; + unsigned long mod_min_sample_time; + int i, max_load; + unsigned int max_freq; + struct cpufreq_interactive_cpuinfo *picpu; + static unsigned int phase = 0; + static unsigned int counter = 0; + unsigned int nr_cpus; + + if (!down_read_trylock(&pcpu->enable_sem)) + return; + if (!pcpu->governor_enabled) + goto exit; + + if (cpu_is_offline(data)) + goto exit; + + spin_lock_irqsave(&pcpu->load_lock, flags); + now = update_load(data); + delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp); + cputime_speedadj = pcpu->cputime_speedadj; + spin_unlock_irqrestore(&pcpu->load_lock, flags); + + if (WARN_ON_ONCE(!delta_time)) + goto rearm; + + do_div(cputime_speedadj, delta_time); + loadadjfreq = (unsigned int)cputime_speedadj * 100; + cpu_load = loadadjfreq / pcpu->target_freq; + pcpu->prev_load = cpu_load; + boosted = boost_val || now < boostpulse_endtime; + + if (counter < 5) { + counter++; + if (counter > 2) { + phase = 1; + } + } + + if (cpu_load >= go_hispeed_load || boosted) { + if (pcpu->target_freq < hispeed_freq) { + nr_cpus = num_online_cpus(); + + pcpu->two_phase_freq = two_phase_freq_array[nr_cpus-1]; + if (pcpu->two_phase_freq < pcpu->policy->cur) + phase = 1; + if (pcpu->two_phase_freq != 0 && phase == 0) { + new_freq = pcpu->two_phase_freq; + } else + new_freq = hispeed_freq; + } else { + new_freq = choose_freq(pcpu, loadadjfreq); + + if (new_freq < hispeed_freq) + new_freq = hispeed_freq; + } + } else { + new_freq = choose_freq(pcpu, loadadjfreq); + + if (sync_freq && new_freq < sync_freq) { + + max_load = 0; + max_freq = 0; + + for_each_online_cpu(i) { + picpu = &per_cpu(cpuinfo, i); + + if (i == data || picpu->prev_load < + up_threshold_any_cpu_load) + continue; + + max_load = max(max_load, picpu->prev_load); + max_freq = max(max_freq, picpu->target_freq); + } + + if (max_freq > up_threshold_any_cpu_freq || + max_load >= up_threshold_any_cpu_load) + new_freq = sync_freq; + } + } + + if (counter > 0) { + counter--; + if (counter == 0) { + phase = 0; + } + } + + if (pcpu->target_freq >= hispeed_freq && + new_freq > pcpu->target_freq && + now - pcpu->hispeed_validate_time < + freq_to_above_hispeed_delay(pcpu->target_freq)) { + goto rearm; + } + + pcpu->hispeed_validate_time = now; + + if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table, + new_freq, CPUFREQ_RELATION_L, + &index)) + goto rearm; + + new_freq = pcpu->freq_table[index].frequency; + + /* + * Do not scale below floor_freq unless we have been at or above the + * floor frequency for the minimum sample time since last validated. + */ + if (sampling_down_factor && pcpu->policy->cur == pcpu->policy->max) + mod_min_sample_time = sampling_down_factor; + else + mod_min_sample_time = min_sample_time; + + if (new_freq < pcpu->floor_freq) { + if (now - pcpu->floor_validate_time < mod_min_sample_time) { + goto rearm; + } + } + + /* + * Update the timestamp for checking whether speed has been held at + * or above the selected frequency for a minimum of min_sample_time, + * if not boosted to hispeed_freq. If boosted to hispeed_freq then we + * allow the speed to drop as soon as the boostpulse duration expires + * (or the indefinite boost is turned off). + */ + + if (!boosted || new_freq > hispeed_freq) { + pcpu->floor_freq = new_freq; + pcpu->floor_validate_time = now; + } + + if (pcpu->target_freq == new_freq) { + goto rearm_if_notmax; + } + + pcpu->target_freq = new_freq; + spin_lock_irqsave(&speedchange_cpumask_lock, flags); + cpumask_set_cpu(data, &speedchange_cpumask); + spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); + wake_up_process(speedchange_task); + +rearm_if_notmax: + /* + * Already set max speed and don't see a need to change that, + * wait until next idle to re-evaluate, don't need timer. + */ + if (pcpu->target_freq == pcpu->policy->max) + goto exit; + +rearm: + if (!timer_pending(&pcpu->cpu_timer)) + cpufreq_interactive_timer_resched(pcpu); + +exit: + up_read(&pcpu->enable_sem); + return; +} + +static void cpufreq_interactive_idle_start(void) +{ + int cpu = smp_processor_id(); + struct cpufreq_interactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, smp_processor_id()); + int pending; + u64 now; + + if (!down_read_trylock(&pcpu->enable_sem)) + return; + if (!pcpu->governor_enabled) + goto exit; + + /* Cancel the timer if cpu is offline */ + if (cpu_is_offline(cpu)) { + del_timer(&pcpu->cpu_timer); + del_timer(&pcpu->cpu_slack_timer); + goto exit; + } + + pending = timer_pending(&pcpu->cpu_timer); + + if (pcpu->target_freq != pcpu->policy->min) { + /* + * Entering idle while not at lowest speed. On some + * platforms this can hold the other CPU(s) at that speed + * even though the CPU is idle. Set a timer to re-evaluate + * speed so this idle CPU doesn't hold the other CPUs above + * min indefinitely. This should probably be a quirk of + * the CPUFreq driver. + */ + if (!pending) { + cpufreq_interactive_timer_resched(pcpu); + + now = ktime_to_us(ktime_get()); + if ((pcpu->policy->cur == pcpu->policy->max) && + (now - pcpu->hispeed_validate_time) > + MIN_BUSY_TIME) { + pcpu->floor_validate_time = now; + } + + } + } +exit: + up_read(&pcpu->enable_sem); +} + +static void cpufreq_interactive_idle_end(void) +{ + struct cpufreq_interactive_cpuinfo *pcpu = + &per_cpu(cpuinfo, smp_processor_id()); + + if (!down_read_trylock(&pcpu->enable_sem)) + return; + if (!pcpu->governor_enabled) { + up_read(&pcpu->enable_sem); + return; + } + + /* Arm the timer for 1-2 ticks later if not already. */ + if (!timer_pending(&pcpu->cpu_timer)) { + cpufreq_interactive_timer_resched(pcpu); + } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) { + del_timer(&pcpu->cpu_timer); + del_timer(&pcpu->cpu_slack_timer); + cpufreq_interactive_timer(smp_processor_id()); + } + + up_read(&pcpu->enable_sem); +} + +static int cpufreq_interactive_speedchange_task(void *data) +{ + unsigned int cpu; + cpumask_t tmp_mask; + unsigned long flags; + struct cpufreq_interactive_cpuinfo *pcpu; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_lock_irqsave(&speedchange_cpumask_lock, flags); + + if (cpumask_empty(&speedchange_cpumask)) { + spin_unlock_irqrestore(&speedchange_cpumask_lock, + flags); + schedule(); + + if (kthread_should_stop()) + break; + + spin_lock_irqsave(&speedchange_cpumask_lock, flags); + } + + set_current_state(TASK_RUNNING); + tmp_mask = speedchange_cpumask; + cpumask_clear(&speedchange_cpumask); + spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); + + for_each_cpu(cpu, &tmp_mask) { + unsigned int j; + unsigned int max_freq = 0; + + pcpu = &per_cpu(cpuinfo, cpu); + if (!down_read_trylock(&pcpu->enable_sem)) + continue; + if (!pcpu->governor_enabled) { + up_read(&pcpu->enable_sem); + continue; + } + + for_each_cpu(j, pcpu->policy->cpus) { + struct cpufreq_interactive_cpuinfo *pjcpu = + &per_cpu(cpuinfo, j); + + if (pjcpu->target_freq > max_freq) + max_freq = pjcpu->target_freq; + } + + if (max_freq != pcpu->policy->cur) + __cpufreq_driver_target(pcpu->policy, + max_freq, + CPUFREQ_RELATION_H); + + up_read(&pcpu->enable_sem); + } + } + + return 0; +} + +static void cpufreq_interactive_boost(void) +{ + int i; + int anyboost = 0; + unsigned long flags; + struct cpufreq_interactive_cpuinfo *pcpu; + + spin_lock_irqsave(&speedchange_cpumask_lock, flags); + + for_each_online_cpu(i) { + pcpu = &per_cpu(cpuinfo, i); + + if (pcpu->target_freq < hispeed_freq) { + pcpu->target_freq = hispeed_freq; + cpumask_set_cpu(i, &speedchange_cpumask); + pcpu->hispeed_validate_time = + ktime_to_us(ktime_get()); + anyboost = 1; + } + + /* + * Set floor freq and (re)start timer for when last + * validated. + */ + + pcpu->floor_freq = hispeed_freq; + pcpu->floor_validate_time = ktime_to_us(ktime_get()); + } + + spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); + + if (anyboost) + wake_up_process(speedchange_task); +} + +static int cpufreq_interactive_notifier( + struct notifier_block *nb, unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpufreq_interactive_cpuinfo *pcpu; + int cpu; + unsigned long flags; + + if (val == CPUFREQ_POSTCHANGE) { + pcpu = &per_cpu(cpuinfo, freq->cpu); + if (!down_read_trylock(&pcpu->enable_sem)) + return 0; + if (!pcpu->governor_enabled) { + up_read(&pcpu->enable_sem); + return 0; + } + + for_each_cpu(cpu, pcpu->policy->cpus) { + struct cpufreq_interactive_cpuinfo *pjcpu = + &per_cpu(cpuinfo, cpu); + if (cpu != freq->cpu) { + if (!down_read_trylock(&pjcpu->enable_sem)) + continue; + if (!pjcpu->governor_enabled) { + up_read(&pjcpu->enable_sem); + continue; + } + } + spin_lock_irqsave(&pjcpu->load_lock, flags); + update_load(cpu); + spin_unlock_irqrestore(&pjcpu->load_lock, flags); + if (cpu != freq->cpu) + up_read(&pjcpu->enable_sem); + } + + up_read(&pcpu->enable_sem); + } + return 0; +} + +static struct notifier_block cpufreq_notifier_block = { + .notifier_call = cpufreq_interactive_notifier, +}; + +static unsigned int *get_tokenized_data(const char *buf, int *num_tokens) +{ + const char *cp; + int i; + int ntokens = 1; + unsigned int *tokenized_data; + int err = -EINVAL; + + cp = buf; + while ((cp = strpbrk(cp + 1, " :"))) + ntokens++; + + if (!(ntokens & 0x1)) + goto err; + + tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL); + if (!tokenized_data) { + err = -ENOMEM; + goto err; + } + + cp = buf; + i = 0; + while (i < ntokens) { + if (sscanf(cp, "%u", &tokenized_data[i++]) != 1) + goto err_kfree; + + cp = strpbrk(cp, " :"); + if (!cp) + break; + cp++; + } + + if (i != ntokens) + goto err_kfree; + + *num_tokens = ntokens; + return tokenized_data; + +err_kfree: + kfree(tokenized_data); +err: + return ERR_PTR(err); +} + +static ssize_t show_two_phase_freq +(struct kobject *kobj, struct attribute *attr, char *buf) +{ + int i = 0 ; + int shift = 0 ; + char *buf_pos = buf; + for ( i = 0 ; i < NR_CPUS; i++) { + shift = sprintf(buf_pos,"%d,",two_phase_freq_array[i]); + buf_pos += shift; + } + *(buf_pos-1) = '\0'; + return strlen(buf); +} + +static ssize_t store_two_phase_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + + int ret = 0; + if (NR_CPUS == 1) + ret = sscanf(buf,"%u",&two_phase_freq_array[0]); + else if (NR_CPUS == 2) + ret = sscanf(buf,"%u,%u",&two_phase_freq_array[0], + &two_phase_freq_array[1]); + else if (NR_CPUS == 4) + ret = sscanf(buf, "%u,%u,%u,%u", &two_phase_freq_array[0], + &two_phase_freq_array[1], + &two_phase_freq_array[2], + &two_phase_freq_array[3]); + if (ret < NR_CPUS) + return -EINVAL; + + return count; +} + +static struct global_attr two_phase_freq_attr = + __ATTR(two_phase_freq, S_IRUGO | S_IWUSR, + show_two_phase_freq, store_two_phase_freq); + +static ssize_t show_target_loads( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + int i; + ssize_t ret = 0; + unsigned long flags; + + spin_lock_irqsave(&target_loads_lock, flags); + + for (i = 0; i < ntarget_loads; i++) + ret += sprintf(buf + ret, "%u%s", target_loads[i], + i & 0x1 ? ":" : " "); + + sprintf(buf + ret - 1, "\n"); + spin_unlock_irqrestore(&target_loads_lock, flags); + return ret; +} + +static ssize_t store_target_loads( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ntokens; + unsigned int *new_target_loads = NULL; + unsigned long flags; + + new_target_loads = get_tokenized_data(buf, &ntokens); + if (IS_ERR(new_target_loads)) + return PTR_RET(new_target_loads); + + spin_lock_irqsave(&target_loads_lock, flags); + if (target_loads != default_target_loads) + kfree(target_loads); + target_loads = new_target_loads; + ntarget_loads = ntokens; + spin_unlock_irqrestore(&target_loads_lock, flags); + return count; +} + +static struct global_attr target_loads_attr = + __ATTR(target_loads, S_IRUGO | S_IWUSR, + show_target_loads, store_target_loads); + +static ssize_t show_above_hispeed_delay( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + int i; + ssize_t ret = 0; + unsigned long flags; + + spin_lock_irqsave(&above_hispeed_delay_lock, flags); + + for (i = 0; i < nabove_hispeed_delay; i++) + ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i], + i & 0x1 ? ":" : " "); + + sprintf(buf + ret - 1, "\n"); + spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); + return ret; +} + +static ssize_t store_above_hispeed_delay( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ntokens; + unsigned int *new_above_hispeed_delay = NULL; + unsigned long flags; + + new_above_hispeed_delay = get_tokenized_data(buf, &ntokens); + if (IS_ERR(new_above_hispeed_delay)) + return PTR_RET(new_above_hispeed_delay); + + spin_lock_irqsave(&above_hispeed_delay_lock, flags); + if (above_hispeed_delay != default_above_hispeed_delay) + kfree(above_hispeed_delay); + above_hispeed_delay = new_above_hispeed_delay; + nabove_hispeed_delay = ntokens; + spin_unlock_irqrestore(&above_hispeed_delay_lock, flags); + return count; + +} + +static struct global_attr above_hispeed_delay_attr = + __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR, + show_above_hispeed_delay, store_above_hispeed_delay); + +static ssize_t show_hispeed_freq(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", hispeed_freq); +} + +static ssize_t store_hispeed_freq(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t count) +{ + int ret; + long unsigned int val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + hispeed_freq = val; + return count; +} + +static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644, + show_hispeed_freq, store_hispeed_freq); + +static ssize_t show_sampling_down_factor(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sampling_down_factor); +} + +static ssize_t store_sampling_down_factor(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t count) +{ + int ret; + long unsigned int val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + sampling_down_factor = val; + return count; +} + +static struct global_attr sampling_down_factor_attr = + __ATTR(sampling_down_factor, 0644, + show_sampling_down_factor, store_sampling_down_factor); + +static ssize_t show_go_hispeed_load(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", go_hispeed_load); +} + +static ssize_t store_go_hispeed_load(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + go_hispeed_load = val; + return count; +} + +static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644, + show_go_hispeed_load, store_go_hispeed_load); + +static ssize_t show_min_sample_time(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", min_sample_time); +} + +static ssize_t store_min_sample_time(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + min_sample_time = val; + return count; +} + +static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644, + show_min_sample_time, store_min_sample_time); + +static ssize_t show_timer_rate(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%lu\n", timer_rate); +} + +static ssize_t store_timer_rate(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = strict_strtoul(buf, 0, &val); + if (ret < 0) + return ret; + timer_rate = val; + return count; +} + +static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644, + show_timer_rate, store_timer_rate); + +static ssize_t show_timer_slack( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", timer_slack_val); +} + +static ssize_t store_timer_slack( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtol(buf, 10, &val); + if (ret < 0) + return ret; + + timer_slack_val = val; + return count; +} + +define_one_global_rw(timer_slack); + +static ssize_t show_boost(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", boost_val); +} + +static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + + boost_val = val; + + if (boost_val) { + cpufreq_interactive_boost(); + } + + return count; +} + +define_one_global_rw(boost); + +static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + + boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val; + cpufreq_interactive_boost(); + return count; +} + +static struct global_attr boostpulse = + __ATTR(boostpulse, 0200, NULL, store_boostpulse); + +static ssize_t show_boostpulse_duration( + struct kobject *kobj, struct attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", boostpulse_duration_val); +} + +static ssize_t store_boostpulse_duration( + struct kobject *kobj, struct attribute *attr, const char *buf, + size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + + boostpulse_duration_val = val; + return count; +} + +define_one_global_rw(boostpulse_duration); + +static ssize_t show_io_is_busy(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", io_is_busy); +} + +static ssize_t store_io_is_busy(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + io_is_busy = val; + return count; +} + +static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644, + show_io_is_busy, store_io_is_busy); + +static ssize_t show_sync_freq(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sync_freq); +} + +static ssize_t store_sync_freq(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + sync_freq = val; + return count; +} + +static struct global_attr sync_freq_attr = __ATTR(sync_freq, 0644, + show_sync_freq, store_sync_freq); + +static ssize_t show_up_threshold_any_cpu_load(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", up_threshold_any_cpu_load); +} + +static ssize_t store_up_threshold_any_cpu_load(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + up_threshold_any_cpu_load = val; + return count; +} + +static struct global_attr up_threshold_any_cpu_load_attr = + __ATTR(up_threshold_any_cpu_load, 0644, + show_up_threshold_any_cpu_load, + store_up_threshold_any_cpu_load); + +static ssize_t show_up_threshold_any_cpu_freq(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%u\n", up_threshold_any_cpu_freq); +} + +static ssize_t store_up_threshold_any_cpu_freq(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + int ret; + unsigned long val; + + ret = kstrtoul(buf, 0, &val); + if (ret < 0) + return ret; + up_threshold_any_cpu_freq = val; + return count; +} + +static struct global_attr up_threshold_any_cpu_freq_attr = + __ATTR(up_threshold_any_cpu_freq, 0644, + show_up_threshold_any_cpu_freq, + store_up_threshold_any_cpu_freq); + +static struct attribute *interactive_attributes[] = { + &target_loads_attr.attr, + &above_hispeed_delay_attr.attr, + &hispeed_freq_attr.attr, + &go_hispeed_load_attr.attr, + &min_sample_time_attr.attr, + &timer_rate_attr.attr, + &timer_slack.attr, + &boost.attr, + &boostpulse.attr, + &boostpulse_duration.attr, + &io_is_busy_attr.attr, + &sampling_down_factor_attr.attr, + &sync_freq_attr.attr, + &up_threshold_any_cpu_load_attr.attr, + &up_threshold_any_cpu_freq_attr.attr, + &two_phase_freq_attr.attr, + NULL, +}; + +static void interactive_input_event(struct input_handle *handle, + unsigned int type, + unsigned int code, int value) +{ + if (type == EV_SYN && code == SYN_REPORT) { + boostpulse_endtime = ktime_to_us(ktime_get()) + + boostpulse_duration_val; + cpufreq_interactive_boost(); + } +} + +static int interactive_input_connect(struct input_handler *handler, + struct input_dev *dev, const struct input_device_id *id) +{ + struct input_handle *handle; + int error; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "cpufreq"; + + error = input_register_handle(handle); + if (error) + goto err2; + + error = input_open_device(handle); + if (error) + goto err1; + + return 0; +err1: + input_unregister_handle(handle); +err2: + kfree(handle); + return error; +} + +static void interactive_input_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id interactive_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_ABSBIT, + .evbit = { BIT_MASK(EV_ABS) }, + .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = + BIT_MASK(ABS_MT_POSITION_X) | + BIT_MASK(ABS_MT_POSITION_Y) }, + }, /* multi-touch touchscreen */ + { + .flags = INPUT_DEVICE_ID_MATCH_KEYBIT | + INPUT_DEVICE_ID_MATCH_ABSBIT, + .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, + .absbit = { [BIT_WORD(ABS_X)] = + BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, + }, /* touchpad */ +}; + +static struct input_handler interactive_input_handler = { + .event = interactive_input_event, + .connect = interactive_input_connect, + .disconnect = interactive_input_disconnect, + .name = "intelliactive", + .id_table = interactive_ids, +}; + +static struct attribute_group interactive_attr_group = { + .attrs = interactive_attributes, + .name = "intelliactive", +}; + +static int cpufreq_interactive_idle_notifier(struct notifier_block *nb, + unsigned long val, + void *data) +{ + switch (val) { + case IDLE_START: + cpufreq_interactive_idle_start(); + break; + case IDLE_END: + cpufreq_interactive_idle_end(); + break; + } + + return 0; +} + +static struct notifier_block cpufreq_interactive_idle_nb = { + .notifier_call = cpufreq_interactive_idle_notifier, +}; + +static int cpufreq_governor_intelliactive(struct cpufreq_policy *policy, + unsigned int event) +{ + int rc; + unsigned int j; + struct cpufreq_interactive_cpuinfo *pcpu; + struct cpufreq_frequency_table *freq_table; + + switch (event) { + case CPUFREQ_GOV_START: + mutex_lock(&gov_lock); + + freq_table = + cpufreq_frequency_get_table(policy->cpu); + if (!hispeed_freq) + hispeed_freq = policy->max; + + for_each_cpu(j, policy->cpus) { + pcpu = &per_cpu(cpuinfo, j); + pcpu->policy = policy; + pcpu->target_freq = policy->cur; + pcpu->freq_table = freq_table; + pcpu->floor_freq = pcpu->target_freq; + pcpu->floor_validate_time = + ktime_to_us(ktime_get()); + pcpu->hispeed_validate_time = + pcpu->floor_validate_time; + down_write(&pcpu->enable_sem); + del_timer_sync(&pcpu->cpu_timer); + del_timer_sync(&pcpu->cpu_slack_timer); + cpufreq_interactive_timer_start(j); + pcpu->governor_enabled = 1; + up_write(&pcpu->enable_sem); + } + + /* + * Do not register the idle hook and create sysfs + * entries if we have already done so. + */ + if (++active_count > 1) { + mutex_unlock(&gov_lock); + return 0; + } + + if (!policy->cpu) + rc = input_register_handler + (&interactive_input_handler); + + rc = sysfs_create_group(cpufreq_global_kobject, + &interactive_attr_group); + if (rc) { + mutex_unlock(&gov_lock); + return rc; + } + + idle_notifier_register(&cpufreq_interactive_idle_nb); + cpufreq_register_notifier( + &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); + mutex_unlock(&gov_lock); + break; + + case CPUFREQ_GOV_STOP: + mutex_lock(&gov_lock); + for_each_cpu(j, policy->cpus) { + pcpu = &per_cpu(cpuinfo, j); + down_write(&pcpu->enable_sem); + pcpu->governor_enabled = 0; + pcpu->target_freq = 0; + del_timer_sync(&pcpu->cpu_timer); + del_timer_sync(&pcpu->cpu_slack_timer); + up_write(&pcpu->enable_sem); + } + + if (--active_count > 0) { + if (!policy->cpu) + input_unregister_handler(&interactive_input_handler); + mutex_unlock(&gov_lock); + return 0; + } + + cpufreq_unregister_notifier( + &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); + idle_notifier_unregister(&cpufreq_interactive_idle_nb); + sysfs_remove_group(cpufreq_global_kobject, + &interactive_attr_group); + mutex_unlock(&gov_lock); + + break; + + case CPUFREQ_GOV_LIMITS: + if (policy->max < policy->cur) + __cpufreq_driver_target(policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > policy->cur) + __cpufreq_driver_target(policy, + policy->min, CPUFREQ_RELATION_L); + for_each_cpu(j, policy->cpus) { + pcpu = &per_cpu(cpuinfo, j); + + /* hold write semaphore to avoid race */ + down_write(&pcpu->enable_sem); + if (pcpu->governor_enabled == 0) { + up_write(&pcpu->enable_sem); + continue; + } + + /* update target_freq firstly */ + if (policy->max < pcpu->target_freq) + pcpu->target_freq = policy->max; + else if (policy->min > pcpu->target_freq) + pcpu->target_freq = policy->min; + + /* Reschedule timer. + * Delete the timers, else the timer callback may + * return without re-arm the timer when failed + * acquire the semaphore. This race may cause timer + * stopped unexpectedly. + */ + del_timer_sync(&pcpu->cpu_timer); + del_timer_sync(&pcpu->cpu_slack_timer); + cpufreq_interactive_timer_start(j); + up_write(&pcpu->enable_sem); + } + break; + } + return 0; +} + +static void cpufreq_interactive_nop_timer(unsigned long data) +{ +} + +static int __init cpufreq_intelliactive_init(void) +{ + unsigned int i; + struct cpufreq_interactive_cpuinfo *pcpu; + struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + + /* Initalize per-cpu timers */ + for_each_possible_cpu(i) { + pcpu = &per_cpu(cpuinfo, i); + init_timer_deferrable(&pcpu->cpu_timer); + pcpu->cpu_timer.function = cpufreq_interactive_timer; + pcpu->cpu_timer.data = i; + init_timer(&pcpu->cpu_slack_timer); + pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer; + spin_lock_init(&pcpu->load_lock); + init_rwsem(&pcpu->enable_sem); + } + + spin_lock_init(&target_loads_lock); + spin_lock_init(&speedchange_cpumask_lock); + spin_lock_init(&above_hispeed_delay_lock); + mutex_init(&gov_lock); + speedchange_task = + kthread_create(cpufreq_interactive_speedchange_task, NULL, + "cfintelliactive"); + if (IS_ERR(speedchange_task)) + return PTR_ERR(speedchange_task); + + sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m); + get_task_struct(speedchange_task); + + /* NB: wake up so the thread does not look hung to the freezer */ + wake_up_process(speedchange_task); + + return cpufreq_register_governor(&cpufreq_gov_intelliactive); +} + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIACTIVE +fs_initcall(cpufreq_intelliactive_init); +#else +module_init(cpufreq_intelliactive_init); +#endif + +static void __exit cpufreq_interactive_exit(void) +{ + cpufreq_unregister_governor(&cpufreq_gov_intelliactive); + kthread_stop(speedchange_task); + put_task_struct(speedchange_task); +} + +module_exit(cpufreq_interactive_exit); + +MODULE_AUTHOR("Mike Chan "); +MODULE_AUTHOR("Paul Reioux "); +MODULE_DESCRIPTION("'cpufreq_intelliactive' - A cpufreq governor for " + "Latency sensitive workloads based on Google's Interactive"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq_intellidemand.c b/drivers/cpufreq/cpufreq_intellidemand.c new file mode 100644 index 00000000000..53a4bacf266 --- /dev/null +++ b/drivers/cpufreq/cpufreq_intellidemand.c @@ -0,0 +1,1832 @@ +/* + * drivers/cpufreq/cpufreq_intellidemand.c + * + * Copyright (C) 2001 Russell King + * (C) 2003 Venkatesh Pallipadi . + * Jun Nakajima + * (C) 2013 The Linux Foundation. All rights reserved. + * (C) 2013 Paul Reioux + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define INTELLIDEMAND_MAJOR_VERSION 5 +#define INTELLIDEMAND_MINOR_VERSION 5 + +/* + * dbs is used in this file as a shortform for demandbased switching + * It helps to keep variable names smaller, simpler + */ + +#define DEF_SAMPLING_RATE (50000) +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) +#define DEF_FREQUENCY_UP_THRESHOLD (80) +#define DEF_SAMPLING_DOWN_FACTOR (1) +#define MAX_SAMPLING_DOWN_FACTOR (100000) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) +#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) +#define MIN_FREQUENCY_UP_THRESHOLD (11) +#define MAX_FREQUENCY_UP_THRESHOLD (100) +#define MIN_FREQUENCY_DOWN_DIFFERENTIAL (1) + +#define DEF_FREQ_STEP (25) +#define DEF_STEP_UP_EARLY_HISPEED (1026000) +#define DEF_STEP_UP_INTERIM_HISPEED (1350000) +#define DEF_SAMPLING_EARLY_HISPEED_FACTOR (2) +#define DEF_SAMPLING_INTERIM_HISPEED_FACTOR (3) + +/* PATCH : SMART_UP */ +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) + +#define SMART_UP_PLUS (0) +#define SMART_UP_SLOW_UP_AT_HIGH_FREQ (1) +#define SUP_MAX_STEP (3) +#define SUP_CORE_NUM (4) +#define SUP_SLOW_UP_DUR (5) +#define SUP_SLOW_UP_DUR_DEFAULT (2) + +#define SUP_HIGH_SLOW_UP_DUR (5) +#define SUP_FREQ_LEVEL (14) + +#if defined(SMART_UP_PLUS) +static unsigned int SUP_THRESHOLD_STEPS[SUP_MAX_STEP] = {85, 90, 95}; +static unsigned int SUP_FREQ_STEPS[SUP_MAX_STEP] = {4, 3, 2}; +typedef struct{ + unsigned int freq_idx; + unsigned int freq_value; +} freq_table_idx; +static freq_table_idx pre_freq_idx[SUP_CORE_NUM] = {}; + +#endif + + +#if defined(SMART_UP_SLOW_UP_AT_HIGH_FREQ) + +#define SUP_SLOW_UP_FREQUENCY (1350000) +#define SUP_HIGH_SLOW_UP_FREQUENCY (1512000) +#define SUP_SLOW_UP_LOAD (90) + +typedef struct { + unsigned int hist_max_load[SUP_SLOW_UP_DUR]; + unsigned int hist_load_cnt; +} history_load; +static void reset_hist(history_load *hist_load); +static history_load hist_load[SUP_CORE_NUM] = {}; + +typedef struct { + unsigned int hist_max_load[SUP_HIGH_SLOW_UP_DUR]; + unsigned int hist_load_cnt; +} history_load_high; +static void reset_hist_high(history_load_high *hist_load); +static history_load_high hist_load_high[SUP_CORE_NUM] = {}; + +#endif + + +/* + * The polling frequency of this governor depends on the capability of + * the processor. Default polling frequency is 1000 times the transition + * latency of the processor. The governor will work on any processor with + * transition latency <= 10mS, using appropriate sampling + * rate. + * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) + * this governor will not work. + * All times here are in uS. + */ +#define MIN_SAMPLING_RATE_RATIO (2) + +static unsigned int min_sampling_rate; + +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (100) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + +#define POWERSAVE_BIAS_MAXLEVEL (1000) +#define POWERSAVE_BIAS_MINLEVEL (-1000) + +static void do_dbs_timer(struct work_struct *work); + +/* Sampling types */ +enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; + +struct cpu_dbs_info_s { + u64 prev_cpu_idle; + u64 prev_cpu_iowait; + u64 prev_cpu_wall; + u64 prev_cpu_nice; + struct cpufreq_policy *cur_policy; + struct delayed_work work; + struct cpufreq_frequency_table *freq_table; + unsigned int freq_lo; + unsigned int freq_lo_jiffies; + unsigned int freq_hi_jiffies; + unsigned int rate_mult; + unsigned int prev_load; + unsigned int max_load; + int cpu; + unsigned int sample_type:1; + unsigned int freq_stay_count; + /* + * percpu mutex that serializes governor limit change with + * do_dbs_timer invocation. We do not want do_dbs_timer to run + * when user is changing the governor or limits. + */ + struct mutex timer_mutex; +}; +static DEFINE_PER_CPU(struct cpu_dbs_info_s, id_cpu_dbs_info); + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info); +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info); + +static unsigned int dbs_enable; /* number of CPUs using this policy */ + +/* + * dbs_mutex protects dbs_enable and dbs_info during start/stop. + */ +static DEFINE_MUTEX(dbs_mutex); + +static struct workqueue_struct *dbs_wq; + +struct dbs_work_struct { + struct work_struct work; + unsigned int cpu; +}; + +static DEFINE_PER_CPU(struct dbs_work_struct, dbs_refresh_work); + +static struct dbs_tuners { + unsigned int sampling_rate; + unsigned int up_threshold; + unsigned int up_threshold_multi_core; + unsigned int down_differential; + unsigned int down_differential_multi_core; + unsigned int optimal_freq; + unsigned int up_threshold_any_cpu_load; + unsigned int sync_freq; + unsigned int ignore_nice; + unsigned int sampling_down_factor; + int powersave_bias; + unsigned int io_is_busy; + /* 20130711 smart_up */ + unsigned int smart_up; + unsigned int smart_slow_up_load; + unsigned int smart_slow_up_freq; + unsigned int smart_slow_up_dur; + unsigned int smart_high_slow_up_freq; + unsigned int smart_high_slow_up_dur; + unsigned int smart_each_off; + /* end smart_up */ + unsigned int freq_step; + unsigned int step_up_early_hispeed; + unsigned int step_up_interim_hispeed; + unsigned int sampling_early_factor; + unsigned int sampling_interim_factor; + unsigned int two_phase_freq; +} dbs_tuners_ins = { + .up_threshold_multi_core = DEF_FREQUENCY_UP_THRESHOLD, + .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, + .down_differential_multi_core = MICRO_FREQUENCY_DOWN_DIFFERENTIAL, + .up_threshold_any_cpu_load = DEF_FREQUENCY_UP_THRESHOLD, + .ignore_nice = 0, + .powersave_bias = 0, + .sync_freq = 1026000, + .optimal_freq = 702000, + /* 20130711 smart_up */ + .smart_up = SMART_UP_PLUS, + .smart_slow_up_load = SUP_SLOW_UP_LOAD, + .smart_slow_up_freq = SUP_SLOW_UP_FREQUENCY, + .smart_slow_up_dur = SUP_SLOW_UP_DUR_DEFAULT, + .smart_high_slow_up_freq = SUP_HIGH_SLOW_UP_FREQUENCY, + .smart_high_slow_up_dur = SUP_HIGH_SLOW_UP_DUR, + .smart_each_off = 0, + /* end smart_up */ + .freq_step = DEF_FREQ_STEP, + .step_up_early_hispeed = DEF_STEP_UP_EARLY_HISPEED, + .step_up_interim_hispeed = DEF_STEP_UP_INTERIM_HISPEED, + .sampling_early_factor = DEF_SAMPLING_EARLY_HISPEED_FACTOR, + .sampling_interim_factor = DEF_SAMPLING_INTERIM_HISPEED_FACTOR, + .two_phase_freq = 702000, + .io_is_busy = 0, + .sampling_rate = DEF_SAMPLING_RATE, +}; + +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) +{ + u64 idle_time; + u64 cur_wall_time; + u64 busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + + idle_time = cur_wall_time - busy_time; + if (wall) + *wall = jiffies_to_usecs(cur_wall_time); + + return jiffies_to_usecs(idle_time); +} + +static inline u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + else if (!io_busy) + idle_time += get_cpu_iowait_time_us(cpu, wall); + + return idle_time; +} + +static inline u64 get_cpu_iowait_time(unsigned int cpu, u64 *wall) +{ + u64 iowait_time = get_cpu_iowait_time_us(cpu, wall); + + if (iowait_time == -1ULL) + return 0; + + return iowait_time; +} + +/* + * Find right freq to be set now with powersave_bias on. + * Returns the freq_hi to be used right now and will set freq_hi_jiffies, + * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs. + */ +static unsigned int powersave_bias_target(struct cpufreq_policy *policy, + unsigned int freq_next, + unsigned int relation) +{ + unsigned int freq_req, freq_avg; + unsigned int freq_hi, freq_lo; + unsigned int index = 0; + unsigned int jiffies_total, jiffies_hi, jiffies_lo; + int freq_reduc; + struct cpu_dbs_info_s *dbs_info = &per_cpu(id_cpu_dbs_info, + policy->cpu); + + if (!dbs_info->freq_table) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_next; + } + + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, + relation, &index); + freq_req = dbs_info->freq_table[index].frequency; + freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000; + freq_avg = freq_req - freq_reduc; + + /* Find freq bounds for freq_avg in freq_table */ + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_H, &index); + freq_lo = dbs_info->freq_table[index].frequency; + index = 0; + cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, + CPUFREQ_RELATION_L, &index); + freq_hi = dbs_info->freq_table[index].frequency; + + /* Find out how long we have to be in hi and lo freqs */ + if (freq_hi == freq_lo) { + dbs_info->freq_lo = 0; + dbs_info->freq_lo_jiffies = 0; + return freq_lo; + } + jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + jiffies_hi = (freq_avg - freq_lo) * jiffies_total; + jiffies_hi += ((freq_hi - freq_lo) / 2); + jiffies_hi /= (freq_hi - freq_lo); + jiffies_lo = jiffies_total - jiffies_hi; + dbs_info->freq_lo = freq_lo; + dbs_info->freq_lo_jiffies = jiffies_lo; + dbs_info->freq_hi_jiffies = jiffies_hi; + return freq_hi; +} + +static int intellidemand_powersave_bias_setspeed(struct cpufreq_policy *policy, + struct cpufreq_policy *altpolicy, + int level) +{ + if (level == POWERSAVE_BIAS_MAXLEVEL) { + /* maximum powersave; set to lowest frequency */ + __cpufreq_driver_target(policy, + (altpolicy) ? altpolicy->min : policy->min, + CPUFREQ_RELATION_L); + return 1; + } else if (level == POWERSAVE_BIAS_MINLEVEL) { + /* minimum powersave; set to highest frequency */ + __cpufreq_driver_target(policy, + (altpolicy) ? altpolicy->max : policy->max, + CPUFREQ_RELATION_H); + return 1; + } + return 0; +} + +static void intellidemand_powersave_bias_init_cpu(int cpu) +{ + struct cpu_dbs_info_s *dbs_info = &per_cpu(id_cpu_dbs_info, cpu); + dbs_info->freq_table = cpufreq_frequency_get_table(cpu); + dbs_info->freq_lo = 0; +} + +static void intellidemand_powersave_bias_init(void) +{ + int i; + for_each_online_cpu(i) { + intellidemand_powersave_bias_init_cpu(i); + } +} + +/************************** sysfs interface ************************/ + +static ssize_t show_sampling_rate_min(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", min_sampling_rate); +} + +define_one_global_ro(sampling_rate_min); + +/* cpufreq_intellidemand Governor Tunables */ +#define show_one(file_name, object) \ +static ssize_t show_##file_name \ +(struct kobject *kobj, struct attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ +} +show_one(sampling_rate, sampling_rate); +show_one(io_is_busy, io_is_busy); +show_one(up_threshold, up_threshold); +show_one(up_threshold_multi_core, up_threshold_multi_core); +show_one(down_differential, down_differential); +show_one(sampling_down_factor, sampling_down_factor); +show_one(ignore_nice_load, ignore_nice); +show_one(optimal_freq, optimal_freq); +show_one(up_threshold_any_cpu_load, up_threshold_any_cpu_load); +show_one(sync_freq, sync_freq); +/* 20130711 smart_up */ +show_one(smart_up, smart_up); +show_one(smart_slow_up_load, smart_slow_up_load); +show_one(smart_slow_up_freq, smart_slow_up_freq); +show_one(smart_slow_up_dur, smart_slow_up_dur); +show_one(smart_high_slow_up_freq, smart_high_slow_up_freq); +show_one(smart_high_slow_up_dur, smart_high_slow_up_dur); +show_one(smart_each_off, smart_each_off); +/* end smart_up */ +show_one(freq_step, freq_step); +show_one(step_up_early_hispeed, step_up_early_hispeed); +show_one(step_up_interim_hispeed, step_up_interim_hispeed); +show_one(sampling_early_factor, sampling_early_factor); +show_one(sampling_interim_factor, sampling_interim_factor); + +static ssize_t show_powersave_bias +(struct kobject *kobj, struct attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias); +} + +static int two_phase_freq_array[NR_CPUS] = {[0 ... NR_CPUS-1] = 810000} ; + +static ssize_t show_two_phase_freq +(struct kobject *kobj, struct attribute *attr, char *buf) +{ + int i = 0 ; + int shift = 0 ; + char *buf_pos = buf; + for ( i = 0 ; i < NR_CPUS; i++) { + shift = sprintf(buf_pos,"%d,",two_phase_freq_array[i]); + buf_pos += shift; + } + *(buf_pos-1) = '\0'; + return strlen(buf); +} + +static ssize_t store_two_phase_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + + int ret = 0; + if (NR_CPUS == 1) + ret = sscanf(buf,"%u",&two_phase_freq_array[0]); + else if (NR_CPUS == 2) + ret = sscanf(buf,"%u,%u",&two_phase_freq_array[0], + &two_phase_freq_array[1]); + else if (NR_CPUS == 4) + ret = sscanf(buf, "%u,%u,%u,%u", &two_phase_freq_array[0], + &two_phase_freq_array[1], + &two_phase_freq_array[2], + &two_phase_freq_array[3]); + if (ret < NR_CPUS) + return -EINVAL; + + return count; +} + +static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); + + return count; +} + +static ssize_t store_sync_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.sync_freq = input; + + return count; +} + +static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.io_is_busy = !!input; + + return count; +} + +static ssize_t store_optimal_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + dbs_tuners_ins.optimal_freq = input; + + return count; +} + +static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold = input; + + return count; +} + +static ssize_t store_up_threshold_multi_core(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold_multi_core = input; + + return count; +} + +static ssize_t store_up_threshold_any_cpu_load(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || + input < MIN_FREQUENCY_UP_THRESHOLD) { + return -EINVAL; + } + dbs_tuners_ins.up_threshold_any_cpu_load = input; + + return count; +} + +static ssize_t store_down_differential(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input >= dbs_tuners_ins.up_threshold || + input < MIN_FREQUENCY_DOWN_DIFFERENTIAL) { + return -EINVAL; + } + + dbs_tuners_ins.down_differential = input; + + return count; +} + +static ssize_t store_sampling_down_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input, j; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_down_factor = input; + + /* Reset down sampling multiplier in case it was active */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(id_cpu_dbs_info, j); + dbs_info->rate_mult = 1; + } + return count; +} + +static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + + unsigned int j; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + + if (input > 1) + input = 1; + + if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ + return count; + } + dbs_tuners_ins.ignore_nice = input; + + /* we need to re-evaluate prev_cpu_idle */ + for_each_online_cpu(j) { + struct cpu_dbs_info_s *dbs_info; + dbs_info = &per_cpu(id_cpu_dbs_info, j); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall, + dbs_tuners_ins.io_is_busy); + if (dbs_tuners_ins.ignore_nice) + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + + } + return count; +} + +static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + int input = 0; + int bypass = 0; + int ret, cpu, reenable_timer, j; + struct cpu_dbs_info_s *dbs_info; + + struct cpumask cpus_timer_done; + cpumask_clear(&cpus_timer_done); + + ret = sscanf(buf, "%d", &input); + + if (ret != 1) + return -EINVAL; + + if (input >= POWERSAVE_BIAS_MAXLEVEL) { + input = POWERSAVE_BIAS_MAXLEVEL; + bypass = 1; + } else if (input <= POWERSAVE_BIAS_MINLEVEL) { + input = POWERSAVE_BIAS_MINLEVEL; + bypass = 1; + } + + if (input == dbs_tuners_ins.powersave_bias) { + /* no change */ + return count; + } + + reenable_timer = ((dbs_tuners_ins.powersave_bias == + POWERSAVE_BIAS_MAXLEVEL) || + (dbs_tuners_ins.powersave_bias == + POWERSAVE_BIAS_MINLEVEL)); + + dbs_tuners_ins.powersave_bias = input; + + get_online_cpus(); + mutex_lock(&dbs_mutex); + + if (!bypass) { + if (reenable_timer) { + /* reinstate dbs timer */ + for_each_online_cpu(cpu) { + if (lock_policy_rwsem_write(cpu) < 0) + continue; + + dbs_info = &per_cpu(id_cpu_dbs_info, cpu); + + for_each_cpu(j, &cpus_timer_done) { + if (!dbs_info->cur_policy) { + pr_err("Dbs policy is NULL\n"); + goto skip_this_cpu; + } + if (cpumask_test_cpu(j, dbs_info-> + cur_policy->cpus)) + goto skip_this_cpu; + } + + cpumask_set_cpu(cpu, &cpus_timer_done); + if (dbs_info->cur_policy) { + dbs_timer_exit(dbs_info); + /* restart dbs timer */ + mutex_lock(&dbs_info->timer_mutex); + dbs_timer_init(dbs_info); + mutex_unlock(&dbs_info->timer_mutex); + } +skip_this_cpu: + unlock_policy_rwsem_write(cpu); + } + } + intellidemand_powersave_bias_init(); + } else { + /* running at maximum or minimum frequencies; cancel + dbs timer as periodic load sampling is not necessary */ + for_each_online_cpu(cpu) { + if (lock_policy_rwsem_write(cpu) < 0) + continue; + + dbs_info = &per_cpu(id_cpu_dbs_info, cpu); + + for_each_cpu(j, &cpus_timer_done) { + if (!dbs_info->cur_policy) { + pr_err("Dbs policy is NULL\n"); + goto skip_this_cpu_bypass; + } + if (cpumask_test_cpu(j, dbs_info-> + cur_policy->cpus)) + goto skip_this_cpu_bypass; + } + + cpumask_set_cpu(cpu, &cpus_timer_done); + + if (dbs_info->cur_policy) { + /* cpu using intellidemand, cancel dbs timer */ + dbs_timer_exit(dbs_info); + + mutex_lock(&dbs_info->timer_mutex); + intellidemand_powersave_bias_setspeed( + dbs_info->cur_policy, + NULL, + input); + mutex_unlock(&dbs_info->timer_mutex); + } +skip_this_cpu_bypass: + unlock_policy_rwsem_write(cpu); + } + } + + mutex_unlock(&dbs_mutex); + put_online_cpus(); + + return count; +} + +/* PATCH : SMART_UP */ +#if defined(SMART_UP_SLOW_UP_AT_HIGH_FREQ) +static void reset_hist(history_load *hist_load) +{ + int i; + + for (i = 0; i < SUP_SLOW_UP_DUR ; i++) + hist_load->hist_max_load[i] = 0; + + hist_load->hist_load_cnt = 0; +} + + +static void reset_hist_high(history_load_high *hist_load) +{ int i; + + for (i = 0; i < SUP_HIGH_SLOW_UP_DUR ; i++) + hist_load->hist_max_load[i] = 0; + + hist_load->hist_load_cnt = 0; +} + +#endif + +/* 20130711 smart_up */ +static ssize_t store_smart_up(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int i, input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input > 1) { + input = 1; + } else if (input < 0) { + input = 0; + } + + /* buffer reset */ + for_each_online_cpu(i) { + reset_hist(&hist_load[i]); + reset_hist_high(&hist_load_high[i]); + } + dbs_tuners_ins.smart_up = input; + + return count; +} + +static ssize_t store_smart_slow_up_load(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int i, input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input > 100) { + input = 100; + } else if (input < 0) { + input = 0; + } + + /* buffer reset */ + for_each_online_cpu(i) { + reset_hist(&hist_load[i]); + reset_hist_high(&hist_load_high[i]); + } + dbs_tuners_ins.smart_slow_up_load = input; + + return count; +} + +static ssize_t store_smart_slow_up_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int i, input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input < 0) + input = 0; + + /* buffer reset */ + for_each_online_cpu(i) { + reset_hist(&hist_load[i]); + reset_hist_high(&hist_load_high[i]); + } + dbs_tuners_ins.smart_slow_up_freq = input; + + return count; +} + +static ssize_t store_smart_slow_up_dur(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int i, input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input > SUP_SLOW_UP_DUR) { + input = SUP_SLOW_UP_DUR; + } else if (input < 1) { + input = 1; + } + + /* buffer reset */ + for_each_online_cpu(i) { + reset_hist(&hist_load[i]); + reset_hist_high(&hist_load_high[i]); + } + dbs_tuners_ins.smart_slow_up_dur = input; + + return count; +} +static ssize_t store_smart_high_slow_up_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + unsigned int i; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input < 0) + input = 0; + /* buffer reset */ + for_each_online_cpu(i) { + reset_hist(&hist_load[i]); + reset_hist_high(&hist_load_high[i]); + } + dbs_tuners_ins.smart_high_slow_up_freq = input; + + return count; +} +static ssize_t store_smart_high_slow_up_dur(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + unsigned int i; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input > SUP_HIGH_SLOW_UP_DUR ) { + input = SUP_HIGH_SLOW_UP_DUR; + }else if (input < 1 ) { + input = 1; + } + /* buffer reset */ + for_each_online_cpu(i) { + reset_hist(&hist_load[i]); + reset_hist_high(&hist_load_high[i]); + } + dbs_tuners_ins.smart_high_slow_up_dur = input; + + return count; +} +static ssize_t store_smart_each_off(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int i, input; + int ret; + + ret = sscanf(buf, "%u", &input); + if (ret != 1) + return -EINVAL; + if (input > SUP_CORE_NUM) { + input = SUP_CORE_NUM; + } else if (input < 0) { + input = 0; + } + + /* buffer reset */ + for_each_online_cpu(i) { + reset_hist(&hist_load[i]); + reset_hist_high(&hist_load_high[i]); + } + dbs_tuners_ins.smart_each_off = input; + + return count; +} +/* end smart_up */ + +static ssize_t store_freq_step(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > 100 || + input < 0) { + return -EINVAL; + } + dbs_tuners_ins.freq_step = input; + + return count; +} + +static ssize_t store_step_up_early_hispeed(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > 2265600 || + input < 0) { + return -EINVAL; + } + dbs_tuners_ins.step_up_early_hispeed = input; + + return count; +} + +static ssize_t store_step_up_interim_hispeed(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input > DEF_STEP_UP_INTERIM_HISPEED || + input < 0) { + return -EINVAL; + } + dbs_tuners_ins.step_up_interim_hispeed = input; + + return count; +} + +static ssize_t store_sampling_early_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_early_factor = input; + + return count; +} + +static ssize_t store_sampling_interim_factor(struct kobject *a, + struct attribute *b, const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1 || input < 1) + return -EINVAL; + dbs_tuners_ins.sampling_interim_factor = input; + + return count; +} + +define_one_global_rw(sampling_rate); +define_one_global_rw(io_is_busy); +define_one_global_rw(up_threshold); +define_one_global_rw(down_differential); +define_one_global_rw(sampling_down_factor); +define_one_global_rw(ignore_nice_load); +define_one_global_rw(powersave_bias); +define_one_global_rw(up_threshold_multi_core); +define_one_global_rw(optimal_freq); +define_one_global_rw(up_threshold_any_cpu_load); +define_one_global_rw(sync_freq); +/* 20130711 smart_up */ +define_one_global_rw(smart_up); +define_one_global_rw(smart_slow_up_load); +define_one_global_rw(smart_slow_up_freq); +define_one_global_rw(smart_slow_up_dur); +define_one_global_rw(smart_high_slow_up_freq); +define_one_global_rw(smart_high_slow_up_dur); +define_one_global_rw(smart_each_off); +/* end smart_up */ +define_one_global_rw(freq_step); +define_one_global_rw(step_up_early_hispeed); +define_one_global_rw(step_up_interim_hispeed); +define_one_global_rw(sampling_early_factor); +define_one_global_rw(sampling_interim_factor); +define_one_global_rw(two_phase_freq); + +static struct attribute *dbs_attributes[] = { + &sampling_rate_min.attr, + &sampling_rate.attr, + &up_threshold.attr, + &down_differential.attr, + &sampling_down_factor.attr, + &ignore_nice_load.attr, + &powersave_bias.attr, + &io_is_busy.attr, + &up_threshold_multi_core.attr, + &optimal_freq.attr, + &up_threshold_any_cpu_load.attr, + &sync_freq.attr, + /* 20130711 smart_up */ + &smart_up.attr, + &smart_slow_up_load.attr, + &smart_slow_up_freq.attr, + &smart_slow_up_dur.attr, + &smart_high_slow_up_freq.attr, + &smart_high_slow_up_dur.attr, + &smart_each_off.attr, + /* end smart_up */ + &freq_step.attr, + &step_up_early_hispeed.attr, + &step_up_interim_hispeed.attr, + &sampling_early_factor.attr, + &sampling_interim_factor.attr, + &two_phase_freq.attr, + NULL +}; + +static struct attribute_group dbs_attr_group = { + .attrs = dbs_attributes, + .name = "intellidemand", +}; + +/************************** sysfs end ************************/ + + +static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) +{ + if (dbs_tuners_ins.powersave_bias) + freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); + else if (p->cur == p->max) + return; + + __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? + CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); +} + +static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) +{ + +#if defined(SMART_UP_PLUS) + unsigned int core_j = 0; +#endif + + /* Extrapolated load of this CPU */ + unsigned int load_at_max_freq = 0; + unsigned int max_load_freq; + /* Current load across this CPU */ + unsigned int cur_load = 0; + unsigned int max_load = 0; + unsigned int max_load_other_cpu = 0; + struct cpufreq_policy *policy; + unsigned int j; + static unsigned int phase = 0; + static unsigned int counter = 0; + unsigned int nr_cpus; + + this_dbs_info->freq_lo = 0; + policy = this_dbs_info->cur_policy; + if (policy == NULL) + return; + + /* + * Every sampling_rate, we check, if current idle time is less + * than 20% (default), then we try to increase frequency + * Every sampling_rate, we look for a the lowest + * frequency which can sustain the load while keeping idle time over + * 30%. If such a frequency exist, we try to decrease to this frequency. + * + * Any frequency increase takes it to the maximum frequency. + * Frequency reduction happens at minimum steps of + * 5% (default) of current frequency + */ + + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + u64 cur_wall_time, cur_idle_time, cur_iowait_time; + unsigned int idle_time, wall_time, iowait_time; + unsigned int load_freq; + int freq_avg; + + j_dbs_info = &per_cpu(id_cpu_dbs_info, j); + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, + dbs_tuners_ins.io_is_busy); + cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); + + wall_time = (unsigned int) + (cur_wall_time - j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) + (cur_idle_time - j_dbs_info->prev_cpu_idle); + j_dbs_info->prev_cpu_idle = cur_idle_time; + + iowait_time = (unsigned int) + (cur_iowait_time - j_dbs_info->prev_cpu_iowait); + j_dbs_info->prev_cpu_iowait = cur_iowait_time; + + if (dbs_tuners_ins.ignore_nice) { + u64 cur_nice; + unsigned long cur_nice_jiffies; + + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; + /* + * Assumption: nice time between sampling periods will + * be less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + idle_time += jiffies_to_usecs(cur_nice_jiffies); + } + + /* + * For the purpose of intellidemand, waiting for disk IO is an + * indication that you're performance critical, and not that + * the system is actually idle. So subtract the iowait time + * from the cpu idle time. + */ + + if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time) + idle_time -= iowait_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + /* + * If the CPU had gone completely idle, and a task just woke up + * on this CPU now, it would be unfair to calculate 'load' the + * usual way for this elapsed time-window, because it will show + * near-zero load, irrespective of how CPU intensive the new + * task is. This is undesirable for latency-sensitive bursty + * workloads. + * + * To avoid this, we reuse the 'load' from the previous + * time-window and give this task a chance to start with a + * reasonably high CPU frequency. + * + * Detecting this situation is easy: the governor's deferrable + * timer would not have fired during CPU-idle periods. Hence + * an unusually large 'wall_time' indicates this scenario. + */ + if (unlikely(wall_time > (2 * dbs_tuners_ins.sampling_rate))) { + cur_load = j_dbs_info->prev_load; + } else { + cur_load = 100 * (wall_time - idle_time) / wall_time; + } + + if (cur_load > max_load) + max_load = cur_load; + + j_dbs_info->max_load = max(cur_load, j_dbs_info->prev_load); + j_dbs_info->prev_load = cur_load; + freq_avg = __cpufreq_driver_getavg(policy, j); + if (policy == NULL) + return; + if (freq_avg <= 0) + freq_avg = policy->cur; + + load_freq = cur_load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; + +#if defined(SMART_UP_PLUS) + max_load = cur_load; + core_j = j; +#endif + + } + + for_each_online_cpu(j) { + struct cpu_dbs_info_s *j_dbs_info; + j_dbs_info = &per_cpu(id_cpu_dbs_info, j); + + if (j == policy->cpu) + continue; + + if (max_load_other_cpu < j_dbs_info->max_load) + max_load_other_cpu = j_dbs_info->max_load; + } + + /* calculate the scaled load across CPU */ + load_at_max_freq = (cur_load * policy->cur)/policy->max; + + cpufreq_notify_utilization(policy, cur_load); + +/* PATCH : SMART_UP */ + if (dbs_tuners_ins.smart_up && (core_j + 1) > + dbs_tuners_ins.smart_each_off) { + if (max_load_freq > SUP_THRESHOLD_STEPS[0] * policy->cur) { + int smart_up_inc = + (policy->max - policy->cur) / SUP_FREQ_STEPS[0]; + int freq_next = 0; + int i = 0; + + /* 20130429 UPDATE */ + int check_idx = 0; + int check_freq = 0; + int temp_up_inc =0; + + if (counter < 5) { + counter++; + if (counter > 2) { + phase = 1; + } + } + + nr_cpus = num_online_cpus(); + dbs_tuners_ins.two_phase_freq = two_phase_freq_array[nr_cpus-1]; + if (dbs_tuners_ins.two_phase_freq < policy->cur) + phase = 1; + if (dbs_tuners_ins.two_phase_freq != 0 && phase == 0) { + dbs_freq_increase(policy, dbs_tuners_ins.two_phase_freq); + } else { + if (policy->cur < policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + dbs_freq_increase(policy, policy->max); + } + + for (i = (SUP_MAX_STEP - 1); i > 0; i--) { + if (max_load_freq > SUP_THRESHOLD_STEPS[i] + * policy->cur) { + smart_up_inc = (policy->max - policy->cur) + / SUP_FREQ_STEPS[i]; + break; + } + } + + /* 20130429 UPDATE */ + check_idx = pre_freq_idx[core_j].freq_idx; + check_freq = pre_freq_idx[core_j].freq_value; + if (( check_idx == 0) + || (this_dbs_info->freq_table[check_idx].frequency + != policy->cur)) { + int i = 0; + for (i =0; i < SUP_FREQ_LEVEL; i ++) { + if (this_dbs_info->freq_table[i].frequency == policy->cur) { + pre_freq_idx[core_j].freq_idx = i; + pre_freq_idx[core_j].freq_value = policy->cur; + check_idx = i; + check_freq = policy->cur; + break; + } + } + } + + if (check_idx < SUP_FREQ_LEVEL-1) { + temp_up_inc = + this_dbs_info->freq_table[check_idx + 1].frequency + - check_freq; + } + + if (smart_up_inc < temp_up_inc ) + smart_up_inc = temp_up_inc; + + freq_next = MIN((policy->cur + smart_up_inc), policy->max); + + if (policy->cur >= dbs_tuners_ins.smart_high_slow_up_freq) { + int idx = hist_load_high[core_j].hist_load_cnt; + int avg_hist_load = 0; + + if (idx >= dbs_tuners_ins.smart_high_slow_up_dur) + idx = 0; + + hist_load_high[core_j].hist_max_load[idx] = max_load; + hist_load_high[core_j].hist_load_cnt = idx + 1; + + /* note : check history_load and get_sum_hist_load */ + if (hist_load_high[core_j]. + hist_max_load[dbs_tuners_ins.smart_high_slow_up_dur - 1] > 0) { + int sum_hist_load_freq = 0; + int i = 0; + for (i = 0; i < dbs_tuners_ins.smart_high_slow_up_dur; i++) + sum_hist_load_freq += + hist_load_high[core_j].hist_max_load[i]; + + avg_hist_load = sum_hist_load_freq + / dbs_tuners_ins.smart_high_slow_up_dur; + + if (avg_hist_load > dbs_tuners_ins.smart_slow_up_load) { + reset_hist_high(&hist_load_high[core_j]); + freq_next = MIN((policy->cur + temp_up_inc), policy->max); + } else + freq_next = policy->cur; + } else { + freq_next = policy->cur; + } + + } else if (policy->cur >= dbs_tuners_ins.smart_slow_up_freq ) { + int idx = hist_load[core_j].hist_load_cnt; + int avg_hist_load = 0; + + if (idx >= dbs_tuners_ins.smart_slow_up_dur) + idx = 0; + + hist_load[core_j].hist_max_load[idx] = max_load; + hist_load[core_j].hist_load_cnt = idx + 1; + + /* note : check history_load and get_sum_hist_load */ + if (hist_load[core_j]. + hist_max_load[dbs_tuners_ins.smart_slow_up_dur - 1] > 0) { + int sum_hist_load_freq = 0; + int i = 0; + for (i = 0; i < dbs_tuners_ins.smart_slow_up_dur; i++) + sum_hist_load_freq += + hist_load[core_j].hist_max_load[i]; + + avg_hist_load = sum_hist_load_freq + / dbs_tuners_ins.smart_slow_up_dur; + + if (avg_hist_load > dbs_tuners_ins.smart_slow_up_load) { + reset_hist(&hist_load[core_j]); + freq_next = MIN((policy->cur + temp_up_inc), policy->max); + } else + freq_next = policy->cur; + } else { + freq_next = policy->cur; + } + } else { + reset_hist(&hist_load[core_j]); + } + if (freq_next == policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + + dbs_freq_increase(policy, freq_next); + return; + } + } else { + /* Check for frequency increase */ + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { + int target; + int inc; + + if (policy->cur < dbs_tuners_ins.step_up_early_hispeed) { + target = dbs_tuners_ins.step_up_early_hispeed; + } else if (policy->cur < dbs_tuners_ins.step_up_interim_hispeed) { + if (policy->cur == dbs_tuners_ins.step_up_early_hispeed) { + if (this_dbs_info->freq_stay_count < + dbs_tuners_ins.sampling_early_factor) { + this_dbs_info->freq_stay_count++; + return; + } + } + this_dbs_info->freq_stay_count = 1; + inc = (policy->max * dbs_tuners_ins.freq_step) / 100; + target = min(dbs_tuners_ins.step_up_interim_hispeed, + policy->cur + inc); + } else { + if (policy->cur == dbs_tuners_ins.step_up_interim_hispeed) { + if (this_dbs_info->freq_stay_count < + dbs_tuners_ins.sampling_interim_factor) { + this_dbs_info->freq_stay_count++; + return; + } + } + this_dbs_info->freq_stay_count = 1; + target = policy->max; + } + + pr_debug("%s: cpu=%d, cur=%d, target=%d\n", + __func__, policy->cpu, policy->cur, target); + + /* If switching to max speed, apply sampling_down_factor */ + if (target == policy->max) + this_dbs_info->rate_mult = + dbs_tuners_ins.sampling_down_factor; + + dbs_freq_increase(policy, target); + return; + } + } + if (counter > 0) { + counter--; + if (counter == 0) { + phase = 0; + } + } + + if (num_online_cpus() > 1) { + if (max_load_other_cpu > + dbs_tuners_ins.up_threshold_any_cpu_load) { + if (policy->cur < dbs_tuners_ins.sync_freq) + dbs_freq_increase(policy, + dbs_tuners_ins.sync_freq); + return; + } + + if (max_load_freq > dbs_tuners_ins.up_threshold_multi_core * + policy->cur) { + if (policy->cur < dbs_tuners_ins.optimal_freq) + dbs_freq_increase(policy, + dbs_tuners_ins.optimal_freq); + return; + } + } + + /* Check for frequency decrease */ + /* if we cannot reduce the frequency anymore, break out early */ + if (policy->cur == policy->min) + return; + + /* + * The optimal frequency is the frequency that is the lowest that + * can support the current CPU usage without triggering the up + * policy. To be safe, we focus 10 points under the threshold. + */ + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); + + /* PATCH : SMART_UP */ + if (dbs_tuners_ins.smart_up && (core_j + 1) > + dbs_tuners_ins.smart_each_off) { + if (freq_next >= dbs_tuners_ins.smart_high_slow_up_freq) { + int idx = hist_load_high[core_j].hist_load_cnt; + + if (idx >= dbs_tuners_ins.smart_high_slow_up_dur) + idx = 0; + + hist_load_high[core_j].hist_max_load[idx] = max_load; + hist_load_high[core_j].hist_load_cnt = idx + 1; + } else if (freq_next >= dbs_tuners_ins.smart_slow_up_freq) { + int idx = hist_load[core_j].hist_load_cnt; + + if (idx >= dbs_tuners_ins.smart_slow_up_dur) + idx = 0; + + hist_load[core_j].hist_max_load[idx] = max_load; + hist_load[core_j].hist_load_cnt = idx + 1; + + reset_hist_high(&hist_load_high[core_j]); + } else if (policy->cur >= dbs_tuners_ins.smart_slow_up_freq) { + reset_hist(&hist_load[core_j]); + reset_hist_high(&hist_load_high[core_j]); + } + } + + /* No longer fully busy, reset rate_mult */ + this_dbs_info->rate_mult = 1; + this_dbs_info->freq_stay_count = 1; + + if (num_online_cpus() > 1) { + if (max_load_other_cpu > + (dbs_tuners_ins.up_threshold_multi_core - + dbs_tuners_ins.down_differential) && + freq_next < dbs_tuners_ins.sync_freq) + freq_next = dbs_tuners_ins.sync_freq; + + if (max_load_freq > + ((dbs_tuners_ins.up_threshold_multi_core - + dbs_tuners_ins.down_differential_multi_core) * + policy->cur) && + freq_next < dbs_tuners_ins.optimal_freq) + freq_next = dbs_tuners_ins.optimal_freq; + + } + if (!dbs_tuners_ins.powersave_bias) { + __cpufreq_driver_target(policy, freq_next, + CPUFREQ_RELATION_L); + } else { + int freq = powersave_bias_target(policy, freq_next, + CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq, + CPUFREQ_RELATION_L); + } + } +} + +static void do_dbs_timer(struct work_struct *work) +{ + struct cpu_dbs_info_s *dbs_info = + container_of(work, struct cpu_dbs_info_s, work.work); + unsigned int cpu = dbs_info->cpu; + int sample_type = dbs_info->sample_type; + int delay; + + mutex_lock(&dbs_info->timer_mutex); + + /* Common NORMAL_SAMPLE setup */ + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + if (!dbs_tuners_ins.powersave_bias || + sample_type == DBS_NORMAL_SAMPLE) { + dbs_check_cpu(dbs_info); + if (dbs_info->freq_lo) { + /* Setup timer for SUB_SAMPLE */ + dbs_info->sample_type = DBS_SUB_SAMPLE; + delay = dbs_info->freq_hi_jiffies; + } else { + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + } + } else { + __cpufreq_driver_target(dbs_info->cur_policy, + dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = dbs_info->freq_lo_jiffies; + } + queue_delayed_work_on(cpu, dbs_wq, &dbs_info->work, delay); + mutex_unlock(&dbs_info->timer_mutex); +} + +static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) +{ + /* We want all CPUs to do sampling nearly on same jiffy */ + int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; + + dbs_info->sample_type = DBS_NORMAL_SAMPLE; + INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); + //queue_work_on(dbs_info->cpu, dbs_wq, &dbs_info->work, delay); + queue_work_on(dbs_info->cpu, dbs_wq, &dbs_info->work.work); + +} + +static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) +{ + cancel_delayed_work_sync(&dbs_info->work); +} + +static void dbs_refresh_callback(struct work_struct *work) +{ + struct cpufreq_policy *policy; + struct cpu_dbs_info_s *this_dbs_info; + struct dbs_work_struct *dbs_work; + unsigned int cpu; + + dbs_work = container_of(work, struct dbs_work_struct, work); + cpu = dbs_work->cpu; + + get_online_cpus(); + + if (lock_policy_rwsem_write(cpu) < 0) + goto bail_acq_sema_failed; + + this_dbs_info = &per_cpu(id_cpu_dbs_info, cpu); + policy = this_dbs_info->cur_policy; + if (!policy) { + /* CPU not using ondemand governor */ + goto bail_incorrect_governor; + } + + if (policy->cur < policy->max) { + /* + * Arch specific cpufreq driver may fail. + * Don't update governor frequency upon failure. + */ + if (__cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_L) >= 0) + policy->cur = policy->max; + + this_dbs_info->prev_cpu_idle = get_cpu_idle_time(cpu, + &this_dbs_info->prev_cpu_wall, dbs_tuners_ins.io_is_busy); + } + +bail_incorrect_governor: + unlock_policy_rwsem_write(cpu); + +bail_acq_sema_failed: + put_online_cpus(); + return; +} + +static int cpufreq_governor_dbs(struct cpufreq_policy *policy, + unsigned int event) +{ + unsigned int cpu = policy->cpu; + struct cpu_dbs_info_s *this_dbs_info; + unsigned int j; + int rc; + + this_dbs_info = &per_cpu(id_cpu_dbs_info, cpu); + + switch (event) { + case CPUFREQ_GOV_START: + if ((!cpu_online(cpu)) || (!policy->cur)) + return -EINVAL; + + mutex_lock(&dbs_mutex); + + dbs_enable++; + for_each_cpu(j, policy->cpus) { + struct cpu_dbs_info_s *j_dbs_info; + u64 tmp; + j_dbs_info = &per_cpu(id_cpu_dbs_info, j); + j_dbs_info->cur_policy = policy; + + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall, + dbs_tuners_ins.io_is_busy); + tmp = j_dbs_info->prev_cpu_wall - + j_dbs_info->prev_cpu_idle; + do_div(tmp, j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_load = 100 * tmp; + if (dbs_tuners_ins.ignore_nice) + j_dbs_info->prev_cpu_nice = + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; + } + this_dbs_info->cpu = cpu; + this_dbs_info->rate_mult = 1; + this_dbs_info->freq_stay_count = 1; + intellidemand_powersave_bias_init_cpu(cpu); + /* + * Start the timerschedule work, when this governor + * is used for first time + */ + if (dbs_enable == 1) { + unsigned int latency; + + rc = sysfs_create_group(cpufreq_global_kobject, + &dbs_attr_group); + if (rc) { + dbs_enable--; + mutex_unlock(&dbs_mutex); + return rc; + } + + /* policy latency is in nS. Convert it to uS first */ + latency = policy->cpuinfo.transition_latency / 1000; + if (latency == 0) + latency = 1; + /* Bring kernel and HW constraints together */ + min_sampling_rate = max(min_sampling_rate, + MIN_LATENCY_MULTIPLIER * latency); + if (latency != 1) + dbs_tuners_ins.sampling_rate = + max(dbs_tuners_ins.sampling_rate, + latency * LATENCY_MULTIPLIER); + dbs_tuners_ins.io_is_busy = 0; + + if (dbs_tuners_ins.optimal_freq == 0) + dbs_tuners_ins.optimal_freq = policy->min; + + if (dbs_tuners_ins.sync_freq == 0) + dbs_tuners_ins.sync_freq = policy->min; + } + mutex_unlock(&dbs_mutex); + + mutex_init(&this_dbs_info->timer_mutex); + + if (!intellidemand_powersave_bias_setspeed( + this_dbs_info->cur_policy, + NULL, + dbs_tuners_ins.powersave_bias)) + dbs_timer_init(this_dbs_info); + break; + + case CPUFREQ_GOV_STOP: + dbs_timer_exit(this_dbs_info); + + mutex_lock(&dbs_mutex); + mutex_destroy(&this_dbs_info->timer_mutex); + + dbs_enable--; + + /* If device is being removed, policy is no longer + * valid. */ + this_dbs_info->cur_policy = NULL; + if (!dbs_enable) + sysfs_remove_group(cpufreq_global_kobject, + &dbs_attr_group); + + mutex_unlock(&dbs_mutex); + + break; + + case CPUFREQ_GOV_LIMITS: + /* If device is being removed, skip set limits */ + if (!this_dbs_info->cur_policy) + break; + mutex_lock(&this_dbs_info->timer_mutex); + if (policy->max < this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->max, CPUFREQ_RELATION_H); + else if (policy->min > this_dbs_info->cur_policy->cur) + __cpufreq_driver_target(this_dbs_info->cur_policy, + policy->min, CPUFREQ_RELATION_L); + else if (dbs_tuners_ins.powersave_bias != 0) + intellidemand_powersave_bias_setspeed( + this_dbs_info->cur_policy, + policy, + dbs_tuners_ins.powersave_bias); + dbs_check_cpu(this_dbs_info); + mutex_unlock(&this_dbs_info->timer_mutex); + break; + } + return 0; +} + +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND +static +#endif +struct cpufreq_governor cpufreq_gov_intellidemand = { + .name = "intellidemand", + .governor = cpufreq_governor_dbs, + .max_transition_latency = TRANSITION_LATENCY_LIMIT, + .owner = THIS_MODULE, +}; + +static int __init cpufreq_gov_dbs_init(void) +{ + u64 idle_time; + unsigned int i; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, NULL); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + /* + * In nohz/micro accounting case we set the minimum frequency + * not depending on HZ, but fixed (very low). The deferred + * timer might skip some samples if idle/sleeping as needed. + */ + min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; + } else { + /* For correct statistics, we need 10 ticks for each measure */ + min_sampling_rate = + MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); + } + + dbs_wq = alloc_workqueue("intellidemand_dbs_wq", WQ_HIGHPRI, 0); + if (!dbs_wq) { + printk(KERN_ERR "Failed to create intellidemand_dbs_wq workqueue\n"); + return -EFAULT; + } + for_each_possible_cpu(i) { + struct cpu_dbs_info_s *this_dbs_info = + &per_cpu(id_cpu_dbs_info, i); + struct dbs_work_struct *dbs_work = + &per_cpu(dbs_refresh_work, i); + + mutex_init(&this_dbs_info->timer_mutex); + INIT_WORK(&dbs_work->work, dbs_refresh_callback); + dbs_work->cpu = i; + } + + return cpufreq_register_governor(&cpufreq_gov_intellidemand); +} + +static void __exit cpufreq_gov_dbs_exit(void) +{ + unsigned int i; + + cpufreq_unregister_governor(&cpufreq_gov_intellidemand); + for_each_possible_cpu(i) { + struct cpu_dbs_info_s *this_dbs_info = + &per_cpu(id_cpu_dbs_info, i); + mutex_destroy(&this_dbs_info->timer_mutex); + } + destroy_workqueue(dbs_wq); +} + +MODULE_AUTHOR("Venkatesh Pallipadi "); +MODULE_AUTHOR("Alexey Starikovskiy "); +MODULE_AUTHOR("Paul Reioux "); +MODULE_DESCRIPTION("'cpufreq_intellidemand' - A dynamic cpufreq governor for " + "Low Latency Frequency Transition capable processors"); +MODULE_LICENSE("GPL"); + +#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTELLIDEMAND +fs_initcall(cpufreq_gov_dbs_init); +#else +module_init(cpufreq_gov_dbs_init); +#endif +module_exit(cpufreq_gov_dbs_exit); diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c index 888ece68a47..f870bb3a326 100644 --- a/drivers/hid/hid-cherry.c +++ b/drivers/hid/hid-cherry.c @@ -29,7 +29,7 @@ static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { + if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n"); rdesc[11] = rdesc[16] = 0xff; rdesc[12] = rdesc[17] = 0x03; diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c index b4f0d8216fd..d7118f8ed34 100644 --- a/drivers/hid/hid-kye.c +++ b/drivers/hid/hid-kye.c @@ -282,7 +282,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, * - change the button usage range to 4-7 for the extra * buttons */ - if (*rsize >= 74 && + if (*rsize >= 75 && rdesc[61] == 0x05 && rdesc[62] == 0x08 && rdesc[63] == 0x19 && rdesc[64] == 0x08 && rdesc[65] == 0x29 && rdesc[66] == 0x0f && diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index e7a7bd1eb34..cb01e67a543 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -111,7 +111,7 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, { unsigned long quirks = (unsigned long)hid_get_drvdata(hdev); - if ((quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 && + if ((quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 && rdesc[84] == 0x8c && rdesc[85] == 0x02) { hid_info(hdev, "fixing up Logitech keyboard report descriptor\n"); @@ -120,7 +120,7 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, } if ((quirks & LG_RDESC_REL_ABS) && *rsize >= 50 && rdesc[32] == 0x81 && rdesc[33] == 0x06 && - rdesc[49] == 0x81 && rdesc[50] == 0x06) { + rdesc[49] == 0x81 && rdesc[51] == 0x06) { hid_info(hdev, "fixing up rel/abs in Logitech report descriptor\n"); rdesc[33] = rdesc[50] = 0x02; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 3bfd74f1ad4..f009ab2df76 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -230,13 +230,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, return; } - if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || - (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { - dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n", - __func__, dj_report->device_index); - return; - } - if (djrcv_dev->paired_dj_devices[dj_report->device_index]) { /* The device is already known. No need to reallocate it. */ dbg_hid("%s: device is already known\n", __func__); @@ -688,7 +681,6 @@ static int logi_dj_raw_event(struct hid_device *hdev, struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); struct dj_report *dj_report = (struct dj_report *) data; unsigned long flags; - bool report_processed = false; dbg_hid("%s, size:%d\n", __func__, size); @@ -716,27 +708,41 @@ static int logi_dj_raw_event(struct hid_device *hdev, * anything else with it. */ + /* case 1) */ + if (data[0] != REPORT_ID_DJ_SHORT) + return false; + + if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || + (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { + /* + * Device index is wrong, bail out. + * This driver can ignore safely the receiver notifications, + * so ignore those reports too. + */ + if (dj_report->device_index != DJ_RECEIVER_INDEX) + dev_err(&hdev->dev, "%s: invalid device index:%d\n", + __func__, dj_report->device_index); + return false; + } + spin_lock_irqsave(&djrcv_dev->lock, flags); - if (dj_report->report_id == REPORT_ID_DJ_SHORT) { - switch (dj_report->report_type) { - case REPORT_TYPE_NOTIF_DEVICE_PAIRED: - case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: - logi_dj_recv_queue_notification(djrcv_dev, dj_report); - break; - case REPORT_TYPE_NOTIF_CONNECTION_STATUS: - if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == - STATUS_LINKLOSS) { - logi_dj_recv_forward_null_report(djrcv_dev, dj_report); - } - break; - default: - logi_dj_recv_forward_report(djrcv_dev, dj_report); + switch (dj_report->report_type) { + case REPORT_TYPE_NOTIF_DEVICE_PAIRED: + case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: + logi_dj_recv_queue_notification(djrcv_dev, dj_report); + break; + case REPORT_TYPE_NOTIF_CONNECTION_STATUS: + if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == + STATUS_LINKLOSS) { + logi_dj_recv_forward_null_report(djrcv_dev, dj_report); } - report_processed = true; + break; + default: + logi_dj_recv_forward_report(djrcv_dev, dj_report); } spin_unlock_irqrestore(&djrcv_dev->lock, flags); - return report_processed; + return true; } static int logi_dj_probe(struct hid_device *hdev, diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h index 4a4000340ce..daeb0aa4bee 100644 --- a/drivers/hid/hid-logitech-dj.h +++ b/drivers/hid/hid-logitech-dj.h @@ -27,6 +27,7 @@ #define DJ_MAX_PAIRED_DEVICES 6 #define DJ_MAX_NUMBER_NOTIFICATIONS 8 +#define DJ_RECEIVER_INDEX 0 #define DJ_DEVICE_INDEX_MIN 1 #define DJ_DEVICE_INDEX_MAX 6 diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 8427463beeb..05643cf9ea1 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -308,6 +308,11 @@ static int magicmouse_raw_event(struct hid_device *hdev, if (size < 4 || ((size - 4) % 9) != 0) return 0; npoints = (size - 4) / 9; + if (npoints > 15) { + hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n", + size); + return 0; + } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 9 + 4); @@ -331,6 +336,11 @@ static int magicmouse_raw_event(struct hid_device *hdev, if (size < 6 || ((size - 6) % 8) != 0) return 0; npoints = (size - 6) / 8; + if (npoints > 15) { + hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n", + size); + return 0; + } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c index dedf757781a..eb0271e115c 100644 --- a/drivers/hid/hid-monterey.c +++ b/drivers/hid/hid-monterey.c @@ -25,7 +25,7 @@ static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { + if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); rdesc[30] = 0x0c; } diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c index f1ea3ff8a98..99f317ac649 100644 --- a/drivers/hid/hid-petalynx.c +++ b/drivers/hid/hid-petalynx.c @@ -26,7 +26,7 @@ static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && + if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && rdesc[41] == 0x00 && rdesc[59] == 0x26 && rdesc[60] == 0xf9 && rdesc[61] == 0x00) { hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n"); diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c index 95f90479f28..4e37b1f4c7e 100644 --- a/drivers/hid/hid-picolcd.c +++ b/drivers/hid/hid-picolcd.c @@ -2370,6 +2370,12 @@ static int picolcd_raw_event(struct hid_device *hdev, if (!data) return 1; + if (size > 64) { + hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n", + size); + return 0; + } + if (report->id == REPORT_KEY_STATE) { if (data->input_keys) ret = picolcd_raw_keypad(data, report, raw_data+1, size-1); diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c index d484a0043dd..3d6ae7bbc25 100644 --- a/drivers/hid/hid-sunplus.c +++ b/drivers/hid/hid-sunplus.c @@ -25,7 +25,7 @@ static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && + if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && rdesc[106] == 0x03) { hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n"); rdesc[105] = rdesc[110] = 0x03; diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index bea0ad478d3..7cde3d6cbd1 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -1000,7 +1000,23 @@ config TOUCHSCREEN_CHARGER_NOTIFY config TOUCHSCREEN_SWEEP2WAKE tristate "Sweep2Wake for touchscreens" + select TOUCHSCREEN_PREVENT_SLEEP default n +config TOUCHSCREEN_DOUBLETAP2WAKE + tristate "DoubleTap2Wake for touchscreens" + select TOUCHSCREEN_PREVENT_SLEEP + default n + +config TOUCHSCREEN_PREVENT_SLEEP + bool "Inihibit sleep on modified touchscreen drivers" + default n + help + This disables the sleep function of modified touchscreen drivers. + +config PWRKEY_SUSPEND + tristate "Suspend touch wake if power button pressed" + default y + endif diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 15d02f10fef..f8cf9b07d0f 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -85,4 +85,5 @@ obj-$(CONFIG_TOUCHSCREEN_CYTTSP_I2C_QC) += cyttsp-i2c-qc.o obj-$(CONFIG_TOUCHSCREEN_LGE_COMMON) += lge_touch_core.o obj-$(CONFIG_TOUCHSCREEN_LGE_SYNAPTICS) += touch_synaptics.o obj-$(CONFIG_TOUCHSCREEN_LGE_SYNAPTICS) += touch_synaptics_ds4_fw_upgrade.o +obj-$(CONFIG_TOUCHSCREEN_ELAN_TF_3K) += ektf3k.o obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += DS4/ diff --git a/drivers/input/touchscreen/SynaImage.h b/drivers/input/touchscreen/SynaImage.h index 4ca2bc1376c..c068d4692a8 100644 --- a/drivers/input/touchscreen/SynaImage.h +++ b/drivers/input/touchscreen/SynaImage.h @@ -5926,4 +5926,4 @@ /*b8d0:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*b8e0:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x51, 0x00, /*b8f0:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - /*b900:*/ 0xff}; \ No newline at end of file + /*b900:*/ 0xff}; diff --git a/drivers/input/touchscreen/doubletap2wake.c b/drivers/input/touchscreen/doubletap2wake.c new file mode 100644 index 00000000000..b85ef2b79c0 --- /dev/null +++ b/drivers/input/touchscreen/doubletap2wake.c @@ -0,0 +1,448 @@ +/* + * drivers/input/touchscreen/doubletap2wake.c + * + * + * Copyright (c) 2013, Dennis Rassmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef CONFIG_HAS_EARLYSUSPEND +#include +#else +#include +#endif +#include +#include + +/* uncomment since no touchscreen defines android touch, do that here */ +//#define ANDROID_TOUCH_DECLARED + +/* if Sweep2Wake is compiled it will already have taken care of this */ +#ifdef CONFIG_TOUCHSCREEN_SWEEP2WAKE +#define ANDROID_TOUCH_DECLARED +#endif + +/* Version, author, desc, etc */ +#define DRIVER_AUTHOR "Dennis Rassmann " +#define DRIVER_DESCRIPTION "Doubletap2wake for almost any device" +#define DRIVER_VERSION "1.0" +#define LOGTAG "[doubletap2wake]: " + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESCRIPTION); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPLv2"); + +/* Tuneables */ +#define DT2W_DEBUG 0 +#define DT2W_DEFAULT 0 + +#define DT2W_PWRKEY_DUR 60 +#define DT2W_FEATHER 200 +#define DT2W_TIME 700 + +/* Resources */ +int dt2w_switch = DT2W_DEFAULT; +static cputime64_t tap_time_pre = 0; +static int touch_x = 0, touch_y = 0, touch_nr = 0, x_pre = 0, y_pre = 0; +static bool touch_x_called = false, touch_y_called = false, touch_cnt = true; +static bool scr_suspended = false, exec_count = true; +#ifndef CONFIG_HAS_EARLYSUSPEND +static struct notifier_block dt2w_lcd_notif; +#endif +struct input_dev * doubletap2wake_pwrdev; +static DEFINE_MUTEX(pwrkeyworklock); +static struct workqueue_struct *dt2w_input_wq; +static struct work_struct dt2w_input_work; + +/* PowerKey setter */ +void power_on_display_dt2w(struct input_dev *input_device) +{ + doubletap2wake_pwrdev = input_device; +} + +/* Read cmdline for dt2w */ +static int __init read_dt2w_cmdline(char *dt2w) +{ + if (strcmp(dt2w, "1") == 0) { + pr_info("[cmdline_dt2w]: DoubleTap2Wake enabled. | dt2w='%s'\n", dt2w); + dt2w_switch = 1; + } else if (strcmp(dt2w, "0") == 0) { + pr_info("[cmdline_dt2w]: DoubleTap2Wake disabled. | dt2w='%s'\n", dt2w); + dt2w_switch = 0; + } else { + pr_info("[cmdline_dt2w]: No valid input found. Going with default: | dt2w='%u'\n", dt2w_switch); + } + return 1; +} +__setup("dt2w=", read_dt2w_cmdline); + +/* reset on finger release */ +static void doubletap2wake_reset(void) { + exec_count = true; + touch_nr = 0; + tap_time_pre = 0; + x_pre = 0; + y_pre = 0; +} + +/* PowerKey work func */ +static void doubletap2wake_presspwr(struct work_struct * doubletap2wake_presspwr_work) { + if (!mutex_trylock(&pwrkeyworklock)) + return; + input_event(doubletap2wake_pwrdev, EV_KEY, KEY_POWER, 1); + input_event(doubletap2wake_pwrdev, EV_SYN, 0, 0); + msleep(DT2W_PWRKEY_DUR); + input_event(doubletap2wake_pwrdev, EV_KEY, KEY_POWER, 0); + input_event(doubletap2wake_pwrdev, EV_SYN, 0, 0); + msleep(DT2W_PWRKEY_DUR); + mutex_unlock(&pwrkeyworklock); + return; +} +static DECLARE_WORK(doubletap2wake_presspwr_work, doubletap2wake_presspwr); + +/* PowerKey trigger */ +static void doubletap2wake_pwrtrigger(void) { + schedule_work(&doubletap2wake_presspwr_work); + return; +} + +/* unsigned */ +static unsigned int calc_feather(int coord, int prev_coord) { + int calc_coord = 0; + calc_coord = coord-prev_coord; + if (calc_coord < 0) + calc_coord = calc_coord * (-1); + return calc_coord; +} + +/* init a new touch */ +static void new_touch(int x, int y) { + tap_time_pre = ktime_to_ms(ktime_get()); + x_pre = x; + y_pre = y; + touch_nr++; +} + +/* Doubletap2wake main function */ +static void detect_doubletap2wake(int x, int y, bool st) +{ + bool single_touch = st; +#if DT2W_DEBUG + pr_info(LOGTAG"x,y(%4d,%4d) single:%s\n", + x, y, (single_touch) ? "true" : "false"); +#endif + if ((single_touch) && (dt2w_switch > 0) && (exec_count) && (touch_cnt)) { + touch_cnt = false; + if (touch_nr == 0) { + new_touch(x, y); + } else if (touch_nr == 1) { + if ((calc_feather(x, x_pre) < DT2W_FEATHER) && + (calc_feather(y, y_pre) < DT2W_FEATHER) && + ((ktime_to_ms(ktime_get())-tap_time_pre) < DT2W_TIME)) + touch_nr++; + else { + doubletap2wake_reset(); + new_touch(x, y); + } + } else { + doubletap2wake_reset(); + new_touch(x, y); + } + if ((touch_nr > 1)) { + pr_info(LOGTAG"ON\n"); + exec_count = false; + doubletap2wake_pwrtrigger(); + doubletap2wake_reset(); + } + } +} + +static void dt2w_input_callback(struct work_struct *unused) { + + detect_doubletap2wake(touch_x, touch_y, true); + + return; +} + +static void dt2w_input_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) { +#if DT2W_DEBUG + pr_info("doubletap2wake: code: %s|%u, val: %i\n", + ((code==ABS_MT_POSITION_X) ? "X" : + (code==ABS_MT_POSITION_Y) ? "Y" : + (code==ABS_MT_TRACKING_ID) ? "ID" : + "undef"), code, value); +#endif + if (!scr_suspended) + return; + + if (code == ABS_MT_SLOT) { + doubletap2wake_reset(); + return; + } + + if (code == ABS_MT_TRACKING_ID && value == -1) { + touch_cnt = true; + return; + } + + if (code == ABS_MT_POSITION_X) { + touch_x = value; + touch_x_called = true; + } + + if (code == ABS_MT_POSITION_Y) { + touch_y = value; + touch_y_called = true; + } + + if (touch_x_called || touch_y_called) { + touch_x_called = false; + touch_y_called = false; + queue_work_on(0, dt2w_input_wq, &dt2w_input_work); + } +} + +static int input_dev_filter(struct input_dev *dev) { + if (strstr(dev->name, "touch")) { + return 0; + } else { + return 1; + } +} + +static int dt2w_input_connect(struct input_handler *handler, + struct input_dev *dev, const struct input_device_id *id) { + struct input_handle *handle; + int error; + + if (input_dev_filter(dev)) + return -ENODEV; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "dt2w"; + + error = input_register_handle(handle); + if (error) + goto err2; + + error = input_open_device(handle); + if (error) + goto err1; + + return 0; +err1: + input_unregister_handle(handle); +err2: + kfree(handle); + return error; +} + +static void dt2w_input_disconnect(struct input_handle *handle) { + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id dt2w_ids[] = { + { .driver_info = 1 }, + { }, +}; + +static struct input_handler dt2w_input_handler = { + .event = dt2w_input_event, + .connect = dt2w_input_connect, + .disconnect = dt2w_input_disconnect, + .name = "dt2w_inputreq", + .id_table = dt2w_ids, +}; + +#ifndef CONFIG_HAS_EARLYSUSPEND +static int lcd_notifier_callback(struct notifier_block *this, + unsigned long event, void *data) +{ + switch (event) { + case LCD_EVENT_ON_END: + scr_suspended = false; + break; + case LCD_EVENT_OFF_END: + scr_suspended = true; + break; + default: + break; + } + + return 0; +} +#else +static void dt2w_early_suspend(struct early_suspend *h) { + scr_suspended = true; +} + +static void dt2w_late_resume(struct early_suspend *h) { + scr_suspended = false; +} + +static struct early_suspend dt2w_early_suspend_handler = { + .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN, + .suspend = dt2w_early_suspend, + .resume = dt2w_late_resume, +}; +#endif + +/* + * SYSFS stuff below here + */ +static ssize_t dt2w_doubletap2wake_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + size_t count = 0; + + count += sprintf(buf, "%d\n", dt2w_switch); + + return count; +} + +static ssize_t dt2w_doubletap2wake_dump(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + if (buf[0] >= '0' && buf[0] <= '2' && buf[1] == '\n') + if (dt2w_switch != buf[0] - '0') + dt2w_switch = buf[0] - '0'; + + return count; +} + +static DEVICE_ATTR(doubletap2wake, (S_IWUSR|S_IRUGO), + dt2w_doubletap2wake_show, dt2w_doubletap2wake_dump); + +static ssize_t dt2w_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + size_t count = 0; + + count += sprintf(buf, "%s\n", DRIVER_VERSION); + + return count; +} + +static ssize_t dt2w_version_dump(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return count; +} + +static DEVICE_ATTR(doubletap2wake_version, (S_IWUSR|S_IRUGO), + dt2w_version_show, dt2w_version_dump); + +/* + * INIT / EXIT stuff below here + */ +#ifdef ANDROID_TOUCH_DECLARED +extern struct kobject *android_touch_kobj; +#else +struct kobject *android_touch_kobj; +EXPORT_SYMBOL_GPL(android_touch_kobj); +#endif +static int __init doubletap2wake_init(void) +{ + int rc = 0; + + doubletap2wake_pwrdev = input_allocate_device(); + if (!doubletap2wake_pwrdev) { + pr_err("Can't allocate suspend autotest power button\n"); + goto err_alloc_dev; + } + + input_set_capability(doubletap2wake_pwrdev, EV_KEY, KEY_POWER); + doubletap2wake_pwrdev->name = "dt2w_pwrkey"; + doubletap2wake_pwrdev->phys = "dt2w_pwrkey/input0"; + + dt2w_input_wq = create_workqueue("dt2wiwq"); + if (!dt2w_input_wq) { + pr_err("%s: Failed to create dt2wiwq workqueue\n", __func__); + return -EFAULT; + } + INIT_WORK(&dt2w_input_work, dt2w_input_callback); + rc = input_register_handler(&dt2w_input_handler); + if (rc) + pr_err("%s: Failed to register dt2w_input_handler\n", __func__); + +#ifndef CONFIG_HAS_EARLYSUSPEND + dt2w_lcd_notif.notifier_call = lcd_notifier_callback; + if (lcd_register_client(&dt2w_lcd_notif) != 0) { + pr_err("%s: Failed to register lcd callback\n", __func__); + } +#else + register_early_suspend(&dt2w_early_suspend_handler); +#endif + +#ifndef ANDROID_TOUCH_DECLARED + android_touch_kobj = kobject_create_and_add("android_touch", NULL) ; + if (android_touch_kobj == NULL) { + pr_warn("%s: android_touch_kobj create_and_add failed\n", __func__); + } +#endif + rc = sysfs_create_file(android_touch_kobj, &dev_attr_doubletap2wake.attr); + if (rc) { + pr_warn("%s: sysfs_create_file failed for doubletap2wake\n", __func__); + } + rc = sysfs_create_file(android_touch_kobj, &dev_attr_doubletap2wake_version.attr); + if (rc) { + pr_warn("%s: sysfs_create_file failed for doubletap2wake_version\n", __func__); + } + +err_alloc_dev: + pr_info(LOGTAG"%s done\n", __func__); + + return 0; +} + +static void __exit doubletap2wake_exit(void) +{ +#ifndef ANDROID_TOUCH_DECLARED + kobject_del(android_touch_kobj); +#endif +#ifndef CONFIG_HAS_EARLYSUSPEND + lcd_unregister_client(&dt2w_lcd_notif); +#endif + input_unregister_handler(&dt2w_input_handler); + destroy_workqueue(dt2w_input_wq); + input_unregister_device(doubletap2wake_pwrdev); + input_free_device(doubletap2wake_pwrdev); + return; +} + +module_init(doubletap2wake_init); +module_exit(doubletap2wake_exit); + diff --git a/drivers/input/touchscreen/lge_touch_core.c b/drivers/input/touchscreen/lge_touch_core.c index 3e632425b6f..60468e6ce32 100644 --- a/drivers/input/touchscreen/lge_touch_core.c +++ b/drivers/input/touchscreen/lge_touch_core.c @@ -36,6 +36,15 @@ #include +#ifdef CONFIG_TOUCHSCREEN_PREVENT_SLEEP +#ifdef CONFIG_TOUCHSCREEN_SWEEP2WAKE +#include +#endif +#ifdef CONFIG_TOUCHSCREEN_DOUBLETAP2WAKE +#include +#endif +#endif + #ifdef CUST_G_TOUCH #include "./DS4/RefCode.h" #include "./DS4/RefCode_PDTScan.h" @@ -540,6 +549,10 @@ void* get_touch_handle(struct i2c_client *client) */ int touch_i2c_read(struct i2c_client *client, u8 reg, int len, u8 *buf) { + +#define SYNAPTICS_I2C_RETRY 10 + int retry = 0; + struct i2c_msg msgs[] = { { .addr = client->addr, @@ -555,12 +568,18 @@ int touch_i2c_read(struct i2c_client *client, u8 reg, int len, u8 *buf) }, }; - if (i2c_transfer(client->adapter, msgs, 2) < 0) { - if (printk_ratelimit()) - TOUCH_ERR_MSG("transfer error\n"); - return -EIO; - } else - return 0; + for (retry = 0; retry <= SYNAPTICS_I2C_RETRY; retry++) { + if (i2c_transfer(client->adapter, msgs, 2) == 2) + break; + + if (retry == SYNAPTICS_I2C_RETRY) { + if (printk_ratelimit()) + TOUCH_ERR_MSG("transfer error\n"); + return -EIO; + } else + msleep(10); + } + return 0; } int touch_i2c_write(struct i2c_client *client, u8 reg, int len, u8 * buf) @@ -2229,8 +2248,6 @@ static void touch_fw_upgrade_func(struct work_struct *work_fw_upgrade) else hrtimer_start(&ts->timer, ktime_set(0, ts->pdata->role->report_period), HRTIMER_MODE_REL); - msleep(ts->pdata->role->booting_delay); - touch_ic_init(ts); if(saved_state == POWER_WAKE || saved_state == POWER_SLEEP) @@ -3459,7 +3476,12 @@ static int touch_probe(struct i2c_client *client, const struct i2c_device_id *id ret = request_threaded_irq(client->irq, touch_irq_handler, touch_thread_irq_handler, - ts->pdata->role->irqflags | IRQF_ONESHOT, client->name, ts); +#ifdef CONFIG_TOUCHSCREEN_PREVENT_SLEEP + ts->pdata->role->irqflags | IRQF_ONESHOT | IRQF_TRIGGER_LOW | IRQF_NO_SUSPEND, +#else + ts->pdata->role->irqflags | IRQF_ONESHOT, +#endif + client->name, ts); if (ret < 0) { TOUCH_ERR_MSG("request_irq failed. use polling mode\n"); @@ -3508,9 +3530,9 @@ static int touch_probe(struct i2c_client *client, const struct i2c_device_id *id ts->accuracy_filter.ignore_pressure_gap = 5; ts->accuracy_filter.delta_max = 30; ts->accuracy_filter.max_pressure = 255; - ts->accuracy_filter.time_to_max_pressure = one_sec / 20; - ts->accuracy_filter.direction_count = one_sec / 6; - ts->accuracy_filter.touch_max_count = one_sec / 2; + ts->accuracy_filter.time_to_max_pressure = one_sec / 25; + ts->accuracy_filter.direction_count = one_sec / 8; + ts->accuracy_filter.touch_max_count = one_sec / 3; } #if defined(CONFIG_HAS_EARLYSUSPEND) @@ -3620,8 +3642,18 @@ static int touch_remove(struct i2c_client *client) static void touch_early_suspend(struct early_suspend *h) { struct lge_touch_data *ts = - container_of(h, struct lge_touch_data, early_suspend); - + container_of(h, struct lge_touch_data, early_suspend); +#ifdef CONFIG_TOUCHSCREEN_PREVENT_SLEEP +#if defined(CONFIG_TOUCHSCREEN_SWEEP2WAKE) || defined(CONFIG_TOUCHSCREEN_DOUBLETAP2WAKE) + bool prevent_sleep = false; +#endif +#if defined(CONFIG_TOUCHSCREEN_SWEEP2WAKE) + prevent_sleep = (s2w_switch > 0) && (s2w_s2sonly == 0); +#endif +#if defined(CONFIG_TOUCHSCREEN_DOUBLETAP2WAKE) + prevent_sleep = prevent_sleep || (dt2w_switch > 0); +#endif +#endif if (unlikely(touch_debug_mask & DEBUG_TRACE)) TOUCH_DEBUG_MSG("\n"); @@ -3636,10 +3668,17 @@ static void touch_early_suspend(struct early_suspend *h) } #endif - if (ts->pdata->role->operation_mode) - disable_irq(ts->client->irq); - else - hrtimer_cancel(&ts->timer); +#ifdef CONFIG_TOUCHSCREEN_PREVENT_SLEEP + if (prevent_sleep) { + enable_irq_wake(ts->client->irq); + release_all_ts_event(ts); + } else +#endif + { + if (ts->pdata->role->operation_mode == INTERRUPT_MODE) + disable_irq(ts->client->irq); + else + hrtimer_cancel(&ts->timer); #ifdef CUST_G_TOUCH if (ts->pdata->role->ghost_detection_enable) { hrtimer_cancel(&hr_touch_trigger_timer); @@ -3654,13 +3693,24 @@ static void touch_early_suspend(struct early_suspend *h) release_all_ts_event(ts); touch_power_cntl(ts, ts->pdata->role->suspend_pwr); + } } static void touch_late_resume(struct early_suspend *h) { struct lge_touch_data *ts = container_of(h, struct lge_touch_data, early_suspend); - +#ifdef CONFIG_TOUCHSCREEN_PREVENT_SLEEP +#if defined(CONFIG_TOUCHSCREEN_SWEEP2WAKE) || defined(CONFIG_TOUCHSCREEN_DOUBLETAP2WAKE) + bool prevent_sleep = false; +#endif +#if defined(CONFIG_TOUCHSCREEN_SWEEP2WAKE) + prevent_sleep = (s2w_switch > 0) && (s2w_s2sonly == 0); +#endif +#if defined(CONFIG_TOUCHSCREEN_DOUBLETAP2WAKE) + prevent_sleep = prevent_sleep || (dt2w_switch > 0); +#endif +#endif if (unlikely(touch_debug_mask & DEBUG_TRACE)) TOUCH_DEBUG_MSG("\n"); @@ -3669,7 +3719,8 @@ static void touch_late_resume(struct early_suspend *h) return; } - touch_power_cntl(ts, ts->pdata->role->resume_pwr); +// touch_power_cntl(ts, ts->pdata->role->resume_pwr); + #ifdef CUST_G_TOUCH if (ts->pdata->role->ghost_detection_enable) { resume_flag = 1; @@ -3677,16 +3728,26 @@ static void touch_late_resume(struct early_suspend *h) } #endif - if (ts->pdata->role->operation_mode) - enable_irq(ts->client->irq); - else - hrtimer_start(&ts->timer, ktime_set(0, ts->pdata->role->report_period), HRTIMER_MODE_REL); - - if (ts->pdata->role->resume_pwr == POWER_ON) - queue_delayed_work(touch_wq, &ts->work_init, - msecs_to_jiffies(ts->pdata->role->booting_delay)); - else - queue_delayed_work(touch_wq, &ts->work_init, 0); +#ifdef CONFIG_TOUCHSCREEN_PREVENT_SLEEP + if (prevent_sleep) + disable_irq_wake(ts->client->irq); + else +#endif + { + touch_power_cntl(ts, ts->pdata->role->resume_pwr); + if (ts->pdata->role->operation_mode == INTERRUPT_MODE) + enable_irq(ts->client->irq); + else + hrtimer_start(&ts->timer, + ktime_set(0, ts->pdata->role->report_period), + HRTIMER_MODE_REL); + + if (ts->pdata->role->resume_pwr == POWER_ON) + queue_delayed_work(touch_wq, &ts->work_init, + msecs_to_jiffies(ts->pdata->role->booting_delay)); + else + queue_delayed_work(touch_wq, &ts->work_init, 0); + } } #endif diff --git a/drivers/input/touchscreen/sweep2wake.c b/drivers/input/touchscreen/sweep2wake.c new file mode 100644 index 00000000000..f0c220f70ff --- /dev/null +++ b/drivers/input/touchscreen/sweep2wake.c @@ -0,0 +1,502 @@ +/* + * drivers/input/touchscreen/sweep2wake.c + * + * + * Copyright (c) 2013, Dennis Rassmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef CONFIG_HAS_EARLYSUSPEND +#include +#else +#include +#endif +#include + +/* uncomment since no touchscreen defines android touch, do that here */ +//#define ANDROID_TOUCH_DECLARED + +/* Version, author, desc, etc */ +#define DRIVER_AUTHOR "Dennis Rassmann " +#define DRIVER_DESCRIPTION "Sweep2wake for almost any device" +#define DRIVER_VERSION "1.5" +#define LOGTAG "[sweep2wake]: " + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESCRIPTION); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPLv2"); + +/* Tuneables */ +#define S2W_DEBUG 0 +#define S2W_DEFAULT 0 +#define S2W_S2SONLY_DEFAULT 0 +#define S2W_PWRKEY_DUR 60 + +#ifdef CONFIG_MACH_MSM8974_HAMMERHEAD +/* Hammerhead aka Nexus 5 */ +#define S2W_Y_MAX 1920 +#define S2W_X_MAX 1080 +#define S2W_Y_LIMIT S2W_Y_MAX-130 +#define S2W_X_B1 400 +#define S2W_X_B2 700 +#define S2W_X_FINAL 250 +#else +/* defaults */ +#define S2W_Y_LIMIT 2350 +#define S2W_X_MAX 1540 +#define S2W_X_B1 500 +#define S2W_X_B2 1000 +#define S2W_X_FINAL 300 +#endif + + +/* Resources */ +int s2w_switch = S2W_DEFAULT, s2w_s2sonly = S2W_S2SONLY_DEFAULT; +static int touch_x = 0, touch_y = 0; +static bool touch_x_called = false, touch_y_called = false; +static bool scr_suspended = false, exec_count = true; +static bool scr_on_touch = false, barrier[2] = {false, false}; +#ifndef CONFIG_HAS_EARLYSUSPEND +static struct notifier_block s2w_lcd_notif; +#endif +static struct input_dev * sweep2wake_pwrdev; +static DEFINE_MUTEX(pwrkeyworklock); +static struct workqueue_struct *s2w_input_wq; +static struct work_struct s2w_input_work; + +/* Read cmdline for s2w */ +static int __init read_s2w_cmdline(char *s2w) +{ + if (strcmp(s2w, "1") == 0) { + pr_info("[cmdline_s2w]: Sweep2Wake enabled. | s2w='%s'\n", s2w); + s2w_switch = 1; + } else if (strcmp(s2w, "0") == 0) { + pr_info("[cmdline_s2w]: Sweep2Wake disabled. | s2w='%s'\n", s2w); + s2w_switch = 0; + } else { + pr_info("[cmdline_s2w]: No valid input found. Going with default: | s2w='%u'\n", s2w_switch); + } + return 1; +} +__setup("s2w=", read_s2w_cmdline); + +/* PowerKey work func */ +static void sweep2wake_presspwr(struct work_struct * sweep2wake_presspwr_work) { + if (!mutex_trylock(&pwrkeyworklock)) + return; + input_event(sweep2wake_pwrdev, EV_KEY, KEY_POWER, 1); + input_event(sweep2wake_pwrdev, EV_SYN, 0, 0); + msleep(S2W_PWRKEY_DUR); + input_event(sweep2wake_pwrdev, EV_KEY, KEY_POWER, 0); + input_event(sweep2wake_pwrdev, EV_SYN, 0, 0); + msleep(S2W_PWRKEY_DUR); + mutex_unlock(&pwrkeyworklock); + return; +} +static DECLARE_WORK(sweep2wake_presspwr_work, sweep2wake_presspwr); + +/* PowerKey trigger */ +static void sweep2wake_pwrtrigger(void) { + schedule_work(&sweep2wake_presspwr_work); + return; +} + +/* reset on finger release */ +static void sweep2wake_reset(void) { + exec_count = true; + barrier[0] = false; + barrier[1] = false; + scr_on_touch = false; +} + +/* Sweep2wake main function */ +static void detect_sweep2wake(int x, int y, bool st) +{ + int prevx = 0, nextx = 0; + bool single_touch = st; +#if S2W_DEBUG + pr_info(LOGTAG"x,y(%4d,%4d) single:%s\n", + x, y, (single_touch) ? "true" : "false"); +#endif + //left->right + if ((single_touch) && (scr_suspended == true) && (s2w_switch > 0)) { + prevx = 0; + nextx = S2W_X_B1; + if ((barrier[0] == true) || + ((x > prevx) && + (x < nextx) && + (y > 0))) { + prevx = nextx; + nextx = S2W_X_B2; + barrier[0] = true; + if ((barrier[1] == true) || + ((x > prevx) && + (x < nextx) && + (y > 0))) { + prevx = nextx; + barrier[1] = true; + if ((x > prevx) && + (y > 0)) { + if (x > (S2W_X_MAX - S2W_X_FINAL)) { + if (exec_count) { + pr_info(LOGTAG"ON\n"); + sweep2wake_pwrtrigger(); + exec_count = false; + } + } + } + } + } + //right->left + } else if ((single_touch) && (scr_suspended == false) && (s2w_switch > 0)) { + scr_on_touch=true; + prevx = (S2W_X_MAX - S2W_X_FINAL); + nextx = S2W_X_B2; + if ((barrier[0] == true) || + ((x < prevx) && + (x > nextx) && + (y > S2W_Y_LIMIT))) { + prevx = nextx; + nextx = S2W_X_B1; + barrier[0] = true; + if ((barrier[1] == true) || + ((x < prevx) && + (x > nextx) && + (y > S2W_Y_LIMIT))) { + prevx = nextx; + barrier[1] = true; + if ((x < prevx) && + (y > S2W_Y_LIMIT)) { + if (x < S2W_X_FINAL) { + if (exec_count) { + pr_info(LOGTAG"OFF\n"); + sweep2wake_pwrtrigger(); + exec_count = false; + } + } + } + } + } + } +} + +static void s2w_input_callback(struct work_struct *unused) { + + detect_sweep2wake(touch_x, touch_y, true); + + return; +} + +static void s2w_input_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) { +#if S2W_DEBUG + pr_info("sweep2wake: code: %s|%u, val: %i\n", + ((code==ABS_MT_POSITION_X) ? "X" : + (code==ABS_MT_POSITION_Y) ? "Y" : + (code==ABS_MT_TRACKING_ID) ? "ID" : + "undef"), code, value); +#endif + if (code == ABS_MT_SLOT) { + sweep2wake_reset(); + return; + } + + if (code == ABS_MT_TRACKING_ID && value == -1) { + sweep2wake_reset(); + return; + } + + if (code == ABS_MT_POSITION_X) { + touch_x = value; + touch_x_called = true; + } + + if (code == ABS_MT_POSITION_Y) { + touch_y = value; + touch_y_called = true; + } + + if (touch_x_called && touch_y_called) { + touch_x_called = false; + touch_y_called = false; + queue_work_on(0, s2w_input_wq, &s2w_input_work); + } +} + +static int input_dev_filter(struct input_dev *dev) { + if (strstr(dev->name, "touch")) { + return 0; + } else { + return 1; + } +} + +static int s2w_input_connect(struct input_handler *handler, + struct input_dev *dev, const struct input_device_id *id) { + struct input_handle *handle; + int error; + + if (input_dev_filter(dev)) + return -ENODEV; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = "s2w"; + + error = input_register_handle(handle); + if (error) + goto err2; + + error = input_open_device(handle); + if (error) + goto err1; + + return 0; +err1: + input_unregister_handle(handle); +err2: + kfree(handle); + return error; +} + +static void s2w_input_disconnect(struct input_handle *handle) { + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id s2w_ids[] = { + { .driver_info = 1 }, + { }, +}; + +static struct input_handler s2w_input_handler = { + .event = s2w_input_event, + .connect = s2w_input_connect, + .disconnect = s2w_input_disconnect, + .name = "s2w_inputreq", + .id_table = s2w_ids, +}; + +#ifndef CONFIG_HAS_EARLYSUSPEND +static int lcd_notifier_callback(struct notifier_block *this, + unsigned long event, void *data) +{ + switch (event) { + case LCD_EVENT_ON_END: + scr_suspended = false; + break; + case LCD_EVENT_OFF_END: + scr_suspended = true; + break; + default: + break; + } + + return 0; +} +#else +static void s2w_early_suspend(struct early_suspend *h) { + scr_suspended = true; +} + +static void s2w_late_resume(struct early_suspend *h) { + scr_suspended = false; +} + +static struct early_suspend s2w_early_suspend_handler = { + .level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN, + .suspend = s2w_early_suspend, + .resume = s2w_late_resume, +}; +#endif + +/* + * SYSFS stuff below here + */ +static ssize_t s2w_sweep2wake_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + size_t count = 0; + + count += sprintf(buf, "%d\n", s2w_switch); + + return count; +} + +static ssize_t s2w_sweep2wake_dump(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + if (buf[0] >= '0' && buf[0] <= '1' && buf[1] == '\n') + if (s2w_switch != buf[0] - '0') + s2w_switch = buf[0] - '0'; + + return count; +} + +static DEVICE_ATTR(sweep2wake, (S_IWUSR|S_IRUGO), + s2w_sweep2wake_show, s2w_sweep2wake_dump); + +static ssize_t s2w_s2w_s2sonly_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + size_t count = 0; + + count += sprintf(buf, "%d\n", s2w_s2sonly); + + return count; +} + +static ssize_t s2w_s2w_s2sonly_dump(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + if (buf[0] >= '0' && buf[0] <= '1' && buf[1] == '\n') + if (s2w_s2sonly != buf[0] - '0') + s2w_s2sonly = buf[0] - '0'; + + return count; +} + +static DEVICE_ATTR(s2w_s2sonly, (S_IWUSR|S_IRUGO), + s2w_s2w_s2sonly_show, s2w_s2w_s2sonly_dump); + +static ssize_t s2w_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + size_t count = 0; + + count += sprintf(buf, "%s\n", DRIVER_VERSION); + + return count; +} + +static ssize_t s2w_version_dump(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return count; +} + +static DEVICE_ATTR(sweep2wake_version, (S_IWUSR|S_IRUGO), + s2w_version_show, s2w_version_dump); + +/* + * INIT / EXIT stuff below here + */ +#ifdef ANDROID_TOUCH_DECLARED +extern struct kobject *android_touch_kobj; +#else +struct kobject *android_touch_kobj; +EXPORT_SYMBOL_GPL(android_touch_kobj); +#endif +static int __init sweep2wake_init(void) +{ + int rc = 0; + + sweep2wake_pwrdev = input_allocate_device(); + if (!sweep2wake_pwrdev) { + pr_err("Can't allocate suspend autotest power button\n"); + goto err_alloc_dev; + } + + input_set_capability(sweep2wake_pwrdev, EV_KEY, KEY_POWER); + sweep2wake_pwrdev->name = "s2w_pwrkey"; + sweep2wake_pwrdev->phys = "s2w_pwrkey/input0"; + + rc = input_register_device(sweep2wake_pwrdev); + if (rc) { + pr_err("%s: input_register_device err=%d\n", __func__, rc); + goto err_input_dev; + } + + s2w_input_wq = create_workqueue("s2wiwq"); + if (!s2w_input_wq) { + pr_err("%s: Failed to create s2wiwq workqueue\n", __func__); + return -EFAULT; + } + INIT_WORK(&s2w_input_work, s2w_input_callback); + rc = input_register_handler(&s2w_input_handler); + if (rc) + pr_err("%s: Failed to register s2w_input_handler\n", __func__); + +#ifndef CONFIG_HAS_EARLYSUSPEND + s2w_lcd_notif.notifier_call = lcd_notifier_callback; + if (lcd_register_client(&s2w_lcd_notif) != 0) { + pr_err("%s: Failed to register lcd callback\n", __func__); + } +#else + register_early_suspend(&s2w_early_suspend_handler); +#endif + +#ifndef ANDROID_TOUCH_DECLARED + android_touch_kobj = kobject_create_and_add("android_touch", NULL) ; + if (android_touch_kobj == NULL) { + pr_warn("%s: android_touch_kobj create_and_add failed\n", __func__); + } +#endif + rc = sysfs_create_file(android_touch_kobj, &dev_attr_sweep2wake.attr); + if (rc) { + pr_warn("%s: sysfs_create_file failed for sweep2wake\n", __func__); + } + rc = sysfs_create_file(android_touch_kobj, &dev_attr_s2w_s2sonly.attr); + if (rc) { + pr_warn("%s: sysfs_create_file failed for s2w_s2sonly\n", __func__); + } + rc = sysfs_create_file(android_touch_kobj, &dev_attr_sweep2wake_version.attr); + if (rc) { + pr_warn("%s: sysfs_create_file failed for sweep2wake_version\n", __func__); + } + +err_input_dev: + input_free_device(sweep2wake_pwrdev); +err_alloc_dev: + pr_info(LOGTAG"%s done\n", __func__); + + return 0; +} + +static void __exit sweep2wake_exit(void) +{ +#ifndef ANDROID_TOUCH_DECLARED + kobject_del(android_touch_kobj); +#endif +#ifndef CONFIG_HAS_EARLYSUSPEND + lcd_unregister_client(&s2w_lcd_notif); +#endif + input_unregister_handler(&s2w_input_handler); + destroy_workqueue(s2w_input_wq); + input_unregister_device(sweep2wake_pwrdev); + input_free_device(sweep2wake_pwrdev); + return; +} + +module_init(sweep2wake_init); +module_exit(sweep2wake_exit); + diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index c0536eda58e..a55353c37b3 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3028,14 +3028,16 @@ int __init amd_iommu_init_dma_ops(void) static void cleanup_domain(struct protection_domain *domain) { - struct iommu_dev_data *dev_data, *next; + struct iommu_dev_data *entry; unsigned long flags; write_lock_irqsave(&amd_iommu_devtable_lock, flags); - list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { - __detach_device(dev_data); - atomic_set(&dev_data->bind, 0); + while (!list_empty(&domain->dev_list)) { + entry = list_first_entry(&domain->dev_list, + struct iommu_dev_data, list); + __detach_device(entry); + atomic_set(&entry->bind, 0); } write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); diff --git a/drivers/leds/leds-pm8xxx.c b/drivers/leds/leds-pm8xxx.c old mode 100755 new mode 100644 index ef56311d7e6..cdf02fdd3eb --- a/drivers/leds/leds-pm8xxx.c +++ b/drivers/leds/leds-pm8xxx.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -20,12 +20,12 @@ #include #include #include +#include #include #include #include - #define SSBI_REG_ADDR_DRV_KEYPAD 0x48 #define PM8XXX_DRV_KEYPAD_BL_MASK 0xf0 #define PM8XXX_DRV_KEYPAD_BL_SHIFT 0x04 @@ -52,6 +52,7 @@ #define WLED_SYNC_REG SSBI_REG_ADDR_WLED_CTRL(11) #define WLED_OVP_CFG_REG SSBI_REG_ADDR_WLED_CTRL(13) #define WLED_BOOST_CFG_REG SSBI_REG_ADDR_WLED_CTRL(14) +#define WLED_RESISTOR_COMPENSATION_CFG_REG SSBI_REG_ADDR_WLED_CTRL(15) #define WLED_HIGH_POLE_CAP_REG SSBI_REG_ADDR_WLED_CTRL(16) #define WLED_STRINGS 0x03 @@ -76,9 +77,14 @@ #define WLED_MAX_LEVEL 255 #define WLED_8_BIT_MASK 0xFF +#define WLED_4_BIT_MASK 0x0F #define WLED_8_BIT_SHFT 0x08 +#define WLED_4_BIT_SHFT 0x04 #define WLED_MAX_DUTY_CYCLE 0xFFF +#define WLED_RESISTOR_COMPENSATION_DEFAULT 20 +#define WLED_RESISTOR_COMPENSATION_MAX 320 + #define WLED_SYNC_VAL 0x07 #define WLED_SYNC_RESET_VAL 0x00 #define WLED_SYNC_MASK 0xF8 @@ -117,7 +123,15 @@ #define PM8XXX_LED_OFFSET(id) ((id) - PM8XXX_ID_LED_0) +#ifdef CONFIG_LGE_PM_PWM_LED #define PM8XXX_LED_PWM_FLAGS (PM_PWM_LUT_LOOP | PM_PWM_LUT_RAMP_UP) +#else +#define PM8XXX_LED_PWM_FLAGS (PM_PWM_LUT_LOOP | PM_PWM_LUT_RAMP_UP | \ + PM_PWM_LUT_PAUSE_LO_EN | PM_PWM_LUT_PAUSE_HI_EN) +#endif + +#define PM8XXX_LED_PWM_GRPFREQ_MAX 255 +#define PM8XXX_LED_PWM_GRPPWM_MAX 255 #define LED_MAP(_version, _kb, _led0, _led1, _led2, _flash_led0, _flash_led1, \ _wled, _rgb_led_red, _rgb_led_green, _rgb_led_blue)\ @@ -134,6 +148,10 @@ _rgb_led_blue << PM8XXX_ID_RGB_LED_BLUE, \ } +#define PM8XXX_PWM_CURRENT_4MA 4 +#define PM8XXX_PWM_CURRENT_8MA 8 +#define PM8XXX_PWM_CURRENT_12MA 12 + /** * supported_leds - leds supported for each PMIC version * @version - version of PMIC @@ -164,12 +182,15 @@ static const struct supported_leds led_map[] = { * @pwm_channel - PWM channel ID * @pwm_period_us - PWM period in micro seconds * @pwm_duty_cycles - struct that describes PWM duty cycles info + * @use_pwm - controlled by userspace */ struct pm8xxx_led_data { struct led_classdev cdev; int id; u8 reg; u8 wled_mod_ctrl_val; + u8 lock_update; + u8 blink; struct device *dev; struct work_struct work; struct mutex lock; @@ -179,23 +200,19 @@ struct pm8xxx_led_data { struct pm8xxx_pwm_duty_cycles *pwm_duty_cycles; struct wled_config_data *wled_cfg; int max_current; + int use_pwm; + int adjust_brightness; + u16 pwm_grppwm; + u16 pwm_grpfreq; + u16 pwm_pause_hi; + u16 pwm_pause_lo; }; -/* Debug mask value - * usage: echo [debug_mask] > /sys/module/leds_pm8xxx/parameters/debug_mask - */ -u32 keyled_debug_mask = DEBUG_LED_NONE; -module_param_named(debug_mask, keyled_debug_mask, int, S_IRUGO|S_IWUSR|S_IWGRP); - - static void led_kp_set(struct pm8xxx_led_data *led, enum led_brightness value) { int rc; u8 level; - if (likely(keyled_debug_mask & DEBUG_LED_TRACE)) - printk("[leds-pm8xxx] led_kp_set \n"); - level = (value << PM8XXX_DRV_KEYPAD_BL_SHIFT) & PM8XXX_DRV_KEYPAD_BL_MASK; @@ -214,11 +231,9 @@ static void led_lc_set(struct pm8xxx_led_data *led, enum led_brightness value) int rc, offset; u8 level; - if (likely(keyled_debug_mask & DEBUG_LED_TRACE)) - printk("[leds-pm8xxx] led_lc_set \n"); - level = (value << PM8XXX_DRV_LED_CTRL_SHIFT) & PM8XXX_DRV_LED_CTRL_MASK; + offset = PM8XXX_LED_OFFSET(led->id); led->reg &= ~PM8XXX_DRV_LED_CTRL_MASK; @@ -226,11 +241,6 @@ static void led_lc_set(struct pm8xxx_led_data *led, enum led_brightness value) rc = pm8xxx_writeb(led->dev->parent, SSBI_REG_ADDR_LED_CTRL(offset), led->reg); - - if (likely(keyled_debug_mask & DEBUG_LED_REG)) - printk("[leds-pm8xxx] led_lc_set (0x%x : %d), level : %d\n", - SSBI_REG_ADDR_LED_CTRL(offset), led->reg, level); - if (rc) dev_err(led->cdev.dev, "can't set (%d) led value rc=%d\n", led->id, rc); @@ -362,8 +372,7 @@ led_rgb_write(struct pm8xxx_led_data *led, u16 addr, enum led_brightness value) { int rc; u8 val, mask; - if (likely(keyled_debug_mask & DEBUG_LED_TRACE)) - printk("[leds-pm8xxx] led_rgb_write\n"); + if (led->id != PM8XXX_ID_RGB_LED_BLUE && led->id != PM8XXX_ID_RGB_LED_RED && led->id != PM8XXX_ID_RGB_LED_GREEN) @@ -413,15 +422,171 @@ led_rgb_set(struct pm8xxx_led_data *led, enum led_brightness value) } } +static int pm8xxx_adjust_brightness(struct led_classdev *led_cdev, + enum led_brightness value) +{ + int level = 0; + struct pm8xxx_led_data *led; + led = container_of(led_cdev, struct pm8xxx_led_data, cdev); + + if (!led->adjust_brightness || !led->cdev.max_brightness) + return value; + + if (led->adjust_brightness == led->cdev.max_brightness) + return value; + + level = (2 * value * led->adjust_brightness + + led->cdev.max_brightness) + / (2 * led->cdev.max_brightness); + + if (!level && value) + level = 1; + + return level; +} + +static int pm8xxx_led_pwm_pattern_update(struct pm8xxx_led_data * led) +{ +#ifdef CONFIG_LGE_PM_PWM_LED + int start_idx, idx_len0, idx_len1; + int rc=0; +#else + int start_idx, idx_len; + int *pcts = NULL; + int i, rc = 0; + int temp = 0; + int pwm_max = 0; + int total_ms, on_ms; + int flags; +#endif + +#ifdef CONFIG_LGE_PM_PWM_LED + if (!led->pwm_duty_cycles || !led->pwm_duty_cycles->duty_pcts0 || !led->pwm_duty_cycles->duty_pcts1) { + dev_err(led->cdev.dev, "duty_cycles and duty_pcts is not exist\n"); + return -EINVAL; + } + + start_idx = led->pwm_duty_cycles->start_idx; + idx_len0 = led->pwm_duty_cycles->num_duty_pcts0; + idx_len1 = led->pwm_duty_cycles->num_duty_pcts1; + + if (idx_len0 >= PM_PWM_LUT_SIZE && start_idx) { + printk("Wrong LUT size or index\n"); + return -EINVAL; + } + if ((start_idx + idx_len0) > PM_PWM_LUT_SIZE) { + printk("Exceed LUT limit\n"); + return -EINVAL; + } + if (idx_len1 >= PM_PWM_LUT_SIZE && start_idx) { + printk("Wrong LUT size or index\n"); + return -EINVAL; + } + if ((start_idx + idx_len1) > PM_PWM_LUT_SIZE) { + printk("Exceed LUT limit\n"); + return -EINVAL; + } + + +#else + + if (!led->pwm_duty_cycles || !led->pwm_duty_cycles->duty_pcts) { + dev_err(led->cdev.dev, "duty_cycles and duty_pcts is not exist\n"); + return -EINVAL; + } + + if (led->pwm_grppwm > 0 && led->pwm_grpfreq > 0) { + total_ms = led->pwm_grpfreq * 50; + on_ms = (led->pwm_grppwm * total_ms) >> 8; + if (PM8XXX_LED_PWM_FLAGS & PM_PWM_LUT_REVERSE) { + led->pwm_duty_cycles->duty_ms = on_ms / + (led->pwm_duty_cycles->num_duty_pcts << 1); + led->pwm_pause_lo = on_ms % + (led->pwm_duty_cycles->num_duty_pcts << 1); + } else { + led->pwm_duty_cycles->duty_ms = on_ms / + (led->pwm_duty_cycles->num_duty_pcts); + led->pwm_pause_lo = on_ms % + (led->pwm_duty_cycles->num_duty_pcts); + } + led->pwm_pause_hi = total_ms - on_ms; + dev_dbg(led->cdev.dev, "duty_ms %d, pause_hi %d, pause_lo %d, total_ms %d, on_ms %d\n", + led->pwm_duty_cycles->duty_ms, led->pwm_pause_hi, led->pwm_pause_lo, + total_ms, on_ms); + } + + pwm_max = pm8xxx_adjust_brightness(&led->cdev, led->cdev.brightness); + start_idx = led->pwm_duty_cycles->start_idx; + idx_len = led->pwm_duty_cycles->num_duty_pcts; + pcts = led->pwm_duty_cycles->duty_pcts; + + if (led->blink) { + int mid = (idx_len - 1) >> 1; + for (i = 0; i <= mid; i++) { + temp = ((pwm_max * i) << 1) / mid + 1; + pcts[i] = temp >> 1; + pcts[idx_len - 1 - i] = temp >> 1; + } + } else { + for (i = 0; i < idx_len; i++) { + pcts[i] = pwm_max; + } + } + + if (idx_len >= PM_PWM_LUT_SIZE && start_idx) { + pr_err("Wrong LUT size or index\n"); + return -EINVAL; + } + if ((start_idx + idx_len) > PM_PWM_LUT_SIZE) { + pr_err("Exceed LUT limit\n"); + return -EINVAL; + } + + flags = PM8XXX_LED_PWM_FLAGS; + + switch (led->max_current) { + case PM8XXX_PWM_CURRENT_4MA: + flags |= PM_PWM_BANK_LO; + break; + case PM8XXX_PWM_CURRENT_8MA: + flags |= PM_PWM_BANK_HI; + break; + case PM8XXX_PWM_CURRENT_12MA: + flags |= (PM_PWM_BANK_LO | PM_PWM_BANK_HI); + break; + default: + flags |= (PM_PWM_BANK_LO | PM_PWM_BANK_HI); + break; + } +#endif + +#ifdef CONFIG_LGE_PM_PWM_LED + rc = pm8xxx_pwm_lut_config(led->pwm_dev, led->pwm_period_us, + led->pwm_duty_cycles->duty_pcts0, + led->pwm_duty_cycles->duty_ms0, + start_idx, idx_len0, 0, 0, + PM8XXX_LED_PWM_FLAGS); +#else + rc = pm8xxx_pwm_lut_config(led->pwm_dev, led->pwm_period_us, + led->pwm_duty_cycles->duty_pcts, + led->pwm_duty_cycles->duty_ms, + start_idx, idx_len, led->pwm_pause_lo, led->pwm_pause_hi, + flags); +#endif + return rc; +} + static int pm8xxx_led_pwm_work(struct pm8xxx_led_data *led) { int duty_us; int rc = 0; - if (likely(keyled_debug_mask & DEBUG_LED_TRACE)) - printk("[leds-pm8xxx] pm8xxx_led_pwm_work\n"); + int level = 0; + + level = pm8xxx_adjust_brightness(&led->cdev, led->cdev.brightness); + if (led->pwm_duty_cycles == NULL) { - duty_us = (led->pwm_period_us * led->cdev.brightness) / - LED_FULL; + printk("pm8xxx_led_pwm_work pwm_duty_cycles is NULL\n"); + duty_us = (led->pwm_period_us * level) / LED_FULL; rc = pwm_config(led->pwm_dev, duty_us, led->pwm_period_us); if (led->cdev.brightness) { led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1, @@ -433,26 +598,38 @@ static int pm8xxx_led_pwm_work(struct pm8xxx_led_data *led) led->cdev.brightness); } } else { - if (led->cdev.brightness) - led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1, - led->cdev.brightness); - rc = pm8xxx_pwm_lut_enable(led->pwm_dev, led->cdev.brightness); - if (!led->cdev.brightness) - led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1, - led->cdev.brightness); + if (level) { +#ifdef CONFIG_LGE_PM_PWM_LED +#else + pm8xxx_led_pwm_pattern_update(led); +#endif + led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1, level); + } + + rc = pm8xxx_pwm_lut_enable(led->pwm_dev, level); + if (!level) + { + led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1, level); + } + + rc = pm8xxx_pwm_lut_enable(led->pwm_dev, level); + if (!level) + led_rgb_write(led, SSBI_REG_ADDR_RGB_CNTL1, level); } return rc; } static void __pm8xxx_led_work(struct pm8xxx_led_data *led, - enum led_brightness level) + enum led_brightness value) { int rc; + int level = 0; mutex_lock(&led->lock); - if (likely(keyled_debug_mask & DEBUG_LED_TRACE)) - printk("[leds-pm8xxx] __pm8xxx_led_work\n"); + + level = pm8xxx_adjust_brightness(&led->cdev, value); + switch (led->id) { case PM8XXX_ID_LED_KB_LIGHT: led_kp_set(led, level); @@ -490,8 +667,11 @@ static void pm8xxx_led_work(struct work_struct *work) struct pm8xxx_led_data *led = container_of(work, struct pm8xxx_led_data, work); - if (likely(keyled_debug_mask & DEBUG_LED_TRACE)) - printk("[leds-pm8xxx] pm8xxx_led_work (id : %d )\n",led->id); + + dev_dbg(led->cdev.dev, "led %s set %d (%s mode)\n", + led->cdev.name, led->cdev.brightness, + (led->pwm_dev ? "pwm" : "manual")); + if (led->pwm_dev == NULL) { __pm8xxx_led_work(led, led->cdev.brightness); } else { @@ -513,9 +693,6 @@ static void pm8xxx_led_set(struct led_classdev *led_cdev, led = container_of(led_cdev, struct pm8xxx_led_data, cdev); - if (likely(keyled_debug_mask & DEBUG_LED_TRACE)) - printk("[leds-pm8xxx] pm8xxx_led_set : %d \n",value); - #ifdef CONFIG_LGE_PM_PWM_LED if (led->id == PM8XXX_ID_LED_2 || led->id == PM8XXX_ID_LED_0) { idx_len0 = led->pwm_duty_cycles->num_duty_pcts0; @@ -538,36 +715,39 @@ static void pm8xxx_led_set(struct led_classdev *led_cdev, } } #endif - if (value < LED_OFF || value > led->cdev.max_brightness) { dev_err(led->cdev.dev, "Invalid brightness value exceeds"); return; } - led->cdev.brightness = value; - schedule_work(&led->work); + if (!led->lock_update) { + schedule_work(&led->work); + } else { + dev_dbg(led->cdev.dev, "set %d pending\n", + value); + } } -static int pm8xxx_set_led_mode_and_max_brightness(struct pm8xxx_led_data *led, +static int pm8xxx_set_led_mode_and_adjust_brightness(struct pm8xxx_led_data *led, enum pm8xxx_led_modes led_mode, int max_current) { switch (led->id) { case PM8XXX_ID_LED_0: case PM8XXX_ID_LED_1: case PM8XXX_ID_LED_2: - led->cdev.max_brightness = max_current / - PM8XXX_ID_LED_CURRENT_FACTOR; - if (led->cdev.max_brightness > MAX_LC_LED_BRIGHTNESS) - led->cdev.max_brightness = MAX_LC_LED_BRIGHTNESS; + led->adjust_brightness = max_current / + PM8XXX_ID_LED_CURRENT_FACTOR; + if (led->adjust_brightness > MAX_LC_LED_BRIGHTNESS) + led->adjust_brightness = MAX_LC_LED_BRIGHTNESS; led->reg = led_mode; break; case PM8XXX_ID_LED_KB_LIGHT: case PM8XXX_ID_FLASH_LED_0: case PM8XXX_ID_FLASH_LED_1: - led->cdev.max_brightness = max_current / + led->adjust_brightness = max_current / PM8XXX_ID_FLASH_CURRENT_FACTOR; - if (led->cdev.max_brightness > MAX_FLASH_BRIGHTNESS) - led->cdev.max_brightness = MAX_FLASH_BRIGHTNESS; + if (led->adjust_brightness > MAX_FLASH_BRIGHTNESS) + led->adjust_brightness = MAX_FLASH_BRIGHTNESS; switch (led_mode) { case PM8XXX_LED_MODE_PWM1: @@ -587,12 +767,11 @@ static int pm8xxx_set_led_mode_and_max_brightness(struct pm8xxx_led_data *led, } break; case PM8XXX_ID_WLED: - led->cdev.max_brightness = WLED_MAX_LEVEL; + led->adjust_brightness = WLED_MAX_LEVEL; break; case PM8XXX_ID_RGB_LED_RED: case PM8XXX_ID_RGB_LED_GREEN: case PM8XXX_ID_RGB_LED_BLUE: - led->cdev.max_brightness = LED_FULL; break; default: dev_err(led->cdev.dev, "LED Id is invalid"); @@ -614,12 +793,13 @@ static enum led_brightness pm8xxx_led_get(struct led_classdev *led_cdev) static int __devinit init_wled(struct pm8xxx_led_data *led) { int rc, i; - u8 val, num_wled_strings; + u8 val, num_wled_strings, comp; + u16 temp; num_wled_strings = led->wled_cfg->num_strings; /* program over voltage protection threshold */ - if (led->wled_cfg->ovp_val > WLED_OVP_37V) { + if (led->wled_cfg->ovp_val > WLED_OVP_27V) { dev_err(led->dev->parent, "Invalid ovp value"); return -EINVAL; } @@ -667,6 +847,46 @@ static int __devinit init_wled(struct pm8xxx_led_data *led) return rc; } + if (led->wled_cfg->comp_res_val) { + /* Add Validation for compensation resistor value */ + if (!(led->wled_cfg->comp_res_val >= + WLED_RESISTOR_COMPENSATION_DEFAULT && + led->wled_cfg->comp_res_val <= + WLED_RESISTOR_COMPENSATION_MAX && + led->wled_cfg->comp_res_val % + WLED_RESISTOR_COMPENSATION_DEFAULT == 0)) { + dev_err(led->dev->parent, "Invalid Value " \ + "for compensation register.\n"); + return -EINVAL; + } + + /* Compute the compensation resistor register value */ + temp = led->wled_cfg->comp_res_val; + temp = (temp - WLED_RESISTOR_COMPENSATION_DEFAULT) / + WLED_RESISTOR_COMPENSATION_DEFAULT; + comp = (temp << WLED_4_BIT_SHFT); + + rc = pm8xxx_readb(led->dev->parent, + WLED_RESISTOR_COMPENSATION_CFG_REG, &val); + if (rc) { + dev_err(led->dev->parent, "can't read wled " \ + "resistor compensation config register rc=%d\n", rc); + return rc; + } + + val = val && WLED_4_BIT_MASK; + val = val | comp; + + /* program compenstation resistor register */ + rc = pm8xxx_writeb(led->dev->parent, + WLED_RESISTOR_COMPENSATION_CFG_REG, val); + if (rc) { + dev_err(led->dev->parent, "can't write wled " \ + "resistor compensation config register rc=%d\n", rc); + return rc; + } + } + /* program high pole capacitance */ if (led->wled_cfg->cp_select > WLED_CP_SELECT_MAX) { dev_err(led->dev->parent, "Invalid pole capacitance"); @@ -832,11 +1052,7 @@ static int __devinit get_init_value(struct pm8xxx_led_data *led, u8 *val) static int pm8xxx_led_pwm_configure(struct pm8xxx_led_data *led) { -#ifdef CONFIG_LGE_PM_PWM_LED - int start_idx, idx_len0, idx_len1, duty_us, rc; -#else - int start_idx, idx_len, duty_us, rc; -#endif + int duty_us, rc; led->pwm_dev = pwm_request(led->pwm_channel, led->cdev.name); @@ -850,60 +1066,313 @@ static int pm8xxx_led_pwm_configure(struct pm8xxx_led_data *led) } if (led->pwm_duty_cycles != NULL) { - start_idx = led->pwm_duty_cycles->start_idx; + rc = pm8xxx_led_pwm_pattern_update(led); + } else { + duty_us = led->pwm_period_us; + rc = pwm_config(led->pwm_dev, duty_us, led->pwm_period_us); + } + + return rc; +} + +static ssize_t pm8xxx_led_max_current_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct pm8xxx_led_data *led; + ssize_t rc = -EINVAL; + char *after; + unsigned long state = simple_strtoul(buf, &after, 10); + size_t count = after - buf; + + led = container_of(led_cdev, struct pm8xxx_led_data, cdev); + + if (isspace(*after)) + count++; + + if (count == size) { + rc = count; + if(state > 20) + state = 20; + led->max_current = state; + led_lc_set(led, state/2); + printk(KERN_INFO "%s change max_current %lu\n", led_cdev->name, state/2); + } + return rc; +} + +static DEVICE_ATTR(max_current, 0200, NULL, pm8xxx_led_max_current_store); + +static ssize_t pm8xxx_led_adjust_brightness_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct pm8xxx_led_data *led; + + ssize_t rc = -EINVAL; + char *after; + unsigned long state = simple_strtoul(buf, &after, 10); + size_t count = after - buf; + + led = container_of(led_cdev, struct pm8xxx_led_data, cdev); + + if (isspace(*after)) + count++; + + if (count == size) { + rc = count; + if(state > 100) + state = 100; + led->adjust_brightness = state; + printk(KERN_INFO "%s change adjust_brightness %lu\n", led_cdev->name, state); + } + return rc; +} + +static DEVICE_ATTR(adjust_brightness, 0200, NULL, pm8xxx_led_adjust_brightness_store); + +static ssize_t pm8xxx_led_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct pm8xxx_led_platform_data *pdata = dev->platform_data; + struct pm8xxx_led_data *leds = (struct pm8xxx_led_data *)dev_get_drvdata(dev); #ifdef CONFIG_LGE_PM_PWM_LED - idx_len0 = led->pwm_duty_cycles->num_duty_pcts0; - idx_len1 = led->pwm_duty_cycles->num_duty_pcts1; + int start_idx = 0, idx_len0 = 0, idx_len1 = 0; #else - idx_len = led->pwm_duty_cycles->num_duty_pcts; + int start_idx = 0, idx_len = 0; #endif + int i, j, n = 0; + for (i = 0; i < pdata->num_configs; i++) + { + n += sprintf(&buf[n], "[%d] %s blink %d, adjust %d, max_current %d\n", + i, leds[i].cdev.name, leds[i].blink, leds[i].adjust_brightness, leds[i].max_current); #ifdef CONFIG_LGE_PM_PWM_LED - if (idx_len0 >= PM_PWM_LUT_SIZE && start_idx) { - printk("Wrong LUT size or index\n"); - return -EINVAL; - } - if ((start_idx + idx_len0) > PM_PWM_LUT_SIZE) { - printk("Exceed LUT limit\n"); - return -EINVAL; + if (leds[i].pwm_duty_cycles != NULL && leds[i].pwm_duty_cycles->duty_pcts0 != NULL && leds[i].pwm_duty_cycles->duty_pcts1) { + start_idx = leds[i].pwm_duty_cycles->start_idx; + idx_len0 = leds[i].pwm_duty_cycles->num_duty_pcts0; + idx_len1 = leds[i].pwm_duty_cycles->num_duty_pcts1; + for (j = 0; j < idx_len0; j++) + { + n += sprintf(&buf[n], "%d ", + leds[i].pwm_duty_cycles->duty_pcts0[j]); + } + n += sprintf(&buf[n], "\n"); + for (j = 0; j < idx_len1; j++) + { + n += sprintf(&buf[n], "%d ", + leds[i].pwm_duty_cycles->duty_pcts1[j]); + } + n += sprintf(&buf[n], "\n"); } - if (idx_len1 >= PM_PWM_LUT_SIZE && start_idx) { - printk("Wrong LUT size or index\n"); - return -EINVAL; +#else + if (leds[i].pwm_duty_cycles != NULL && leds[i].pwm_duty_cycles->duty_pcts != NULL) { + start_idx = leds[i].pwm_duty_cycles->start_idx; + idx_len = leds[i].pwm_duty_cycles->num_duty_pcts; + for (j = 0; j < idx_len; j++) + { + n += sprintf(&buf[n], "%d ", + leds[i].pwm_duty_cycles->duty_pcts[j]); + } + n += sprintf(&buf[n], "\n"); } - if ((start_idx + idx_len1) > PM_PWM_LUT_SIZE) { - printk("Exceed LUT limit\n"); - return -EINVAL; +#endif + else + n += sprintf(&buf[n], "not exist\n"); + + } + return n; +} + +static DEVICE_ATTR(status, 0444, pm8xxx_led_status_show, NULL); + +static ssize_t pm8xxx_led_lock_update_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct pm8xxx_led_platform_data *pdata = dev->platform_data; + struct pm8xxx_led_data *leds = (struct pm8xxx_led_data *)dev_get_drvdata(dev); + int i, n = 0; + + for (i = 0; i < pdata->num_configs; i++) + { + n += sprintf(&buf[n], "%s is %s\n", + leds[i].cdev.name, + (leds[i].lock_update ? "non-updatable" : "updatable")); + } + + return n; +} + +static ssize_t pm8xxx_led_lock_update_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct pm8xxx_led_platform_data *pdata = dev->platform_data; + struct pm8xxx_led_data *leds = (struct pm8xxx_led_data *)dev_get_drvdata(dev); + ssize_t rc = -EINVAL; + char *after; + unsigned long state = simple_strtoul(buf, &after, 10); + size_t count = after - buf; + int i; + + if (isspace(*after)) + count++; + + if (count == size) { + rc = count; + for (i = 0; i < pdata->num_configs; i++) + { + leds[i].lock_update = state; + if (!state) { + dev_info(dev, "resume %s set %d\n", + leds[i].cdev.name, leds[i].cdev.brightness); + schedule_work(&leds[i].work); + } } - rc = pm8xxx_pwm_lut_config(led->pwm_dev, led->pwm_period_us, - led->pwm_duty_cycles->duty_pcts0, - led->pwm_duty_cycles->duty_ms0, - start_idx, idx_len0, 0, 0, - PM8XXX_LED_PWM_FLAGS); -#else - if (idx_len >= PM_PWM_LUT_SIZE && start_idx) { - pr_err("Wrong LUT size or index\n"); - return -EINVAL; + } + return rc; +} + +static DEVICE_ATTR(lock, 0644, pm8xxx_led_lock_update_show, pm8xxx_led_lock_update_store); + +static ssize_t pm8xxx_led_grppwm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct pm8xxx_led_platform_data *pdata = dev->platform_data; + struct pm8xxx_led_data *leds = (struct pm8xxx_led_data *)dev_get_drvdata(dev); + int i, n = 0; + + for (i = 0; i < pdata->num_configs; i++) + { + n += sprintf(&buf[n], "%s period_us is %d\n", leds[i].cdev.name, leds[i].pwm_grppwm); + } + return n; +} + +static ssize_t pm8xxx_led_grppwm_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + const struct pm8xxx_led_platform_data *pdata = dev->platform_data; + struct pm8xxx_led_data *leds = (struct pm8xxx_led_data *)dev_get_drvdata(dev); + ssize_t rc = -EINVAL; + char *after; + unsigned long state = simple_strtoul(buf, &after, 10); + size_t count = after - buf; + int i; + + if (isspace(*after)) + count++; + + if (count == size) { + rc = count; + if (state < 0) + state = 0; + if (state > PM8XXX_LED_PWM_GRPPWM_MAX) + state = PM8XXX_LED_PWM_GRPPWM_MAX; + + for (i = 0; i < pdata->num_configs; i++) + { + leds[i].pwm_grppwm = state; + dev_dbg(leds[i].cdev.dev, "set grppwm %lu\n", state); } - if ((start_idx + idx_len) > PM_PWM_LUT_SIZE) { - pr_err("Exceed LUT limit\n"); - return -EINVAL; + } + return rc; +} + +static DEVICE_ATTR(grppwm, 0644, pm8xxx_led_grppwm_show, pm8xxx_led_grppwm_store); + +static ssize_t pm8xxx_led_grpfreq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct pm8xxx_led_platform_data *pdata = dev->platform_data; + struct pm8xxx_led_data *leds = (struct pm8xxx_led_data *)dev_get_drvdata(dev); + int i, n = 0; + + for (i = 0; i < pdata->num_configs; i++) + { + n += sprintf(&buf[n], "%s freq %d\n", leds[i].cdev.name, leds[i].pwm_grpfreq); + } + return n; +} + +static ssize_t pm8xxx_led_grpfreq_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + const struct pm8xxx_led_platform_data *pdata = dev->platform_data; + struct pm8xxx_led_data *leds = (struct pm8xxx_led_data *)dev_get_drvdata(dev); + ssize_t rc = -EINVAL; + char *after; + unsigned long state = simple_strtoul(buf, &after, 10); + size_t count = after - buf; + int i; + + if (isspace(*after)) + count++; + + if (count == size) { + rc = count; + + if(state < 0) + state = 0; + if(state > PM8XXX_LED_PWM_GRPFREQ_MAX) + state = PM8XXX_LED_PWM_GRPFREQ_MAX; + + for (i = 0; i < pdata->num_configs; i++) + { + leds[i].pwm_grpfreq = state; + dev_dbg(leds[i].cdev.dev, "set grpfreq %lu\n", state); } + } + return rc; +} - rc = pm8xxx_pwm_lut_config(led->pwm_dev, led->pwm_period_us, - led->pwm_duty_cycles->duty_pcts, - led->pwm_duty_cycles->duty_ms, - start_idx, idx_len, 0, 0, - PM8XXX_LED_PWM_FLAGS); -#endif - } else { - duty_us = led->pwm_period_us; - rc = pwm_config(led->pwm_dev, duty_us, led->pwm_period_us); +static DEVICE_ATTR(grpfreq, 0644, pm8xxx_led_grpfreq_show, pm8xxx_led_grpfreq_store); + +static ssize_t pm8xxx_led_blink_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const struct pm8xxx_led_platform_data *pdata = dev->platform_data; + struct pm8xxx_led_data *leds = (struct pm8xxx_led_data *)dev_get_drvdata(dev); + int i, blink = 0; + + for (i = 0; i < pdata->num_configs; i++) + { + if (leds[i].blink) + blink |= 1 << i; } + return sprintf(buf, "%d", blink); +} +static ssize_t pm8xxx_led_blink_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + const struct pm8xxx_led_platform_data *pdata = dev->platform_data; + struct pm8xxx_led_data *leds = (struct pm8xxx_led_data *)dev_get_drvdata(dev); + ssize_t rc = -EINVAL; + char *after; + unsigned long state = simple_strtoul(buf, &after, 10); + size_t count = after - buf; + int i; + + if (isspace(*after)) + count++; + + if (count == size) { + rc = count; + + for (i = 0; i < pdata->num_configs; i++) + { + if (leds[i].blink != state) { + leds[i].blink = state; + if(!leds[i].lock_update) + rc = pm8xxx_led_pwm_pattern_update(&leds[i]); + } + } + } return rc; } +static DEVICE_ATTR(blink, 0644, pm8xxx_led_blink_show, pm8xxx_led_blink_store); + static int __devinit pm8xxx_led_probe(struct platform_device *pdev) { @@ -916,8 +1385,6 @@ static int __devinit pm8xxx_led_probe(struct platform_device *pdev) bool found = false; int rc, i, j; - printk("[leds_pm8xxx] pm8xxx_led_probe start\n"); - if (pdata == NULL) { dev_err(&pdev->dev, "platform data not supplied\n"); return -EINVAL; @@ -948,6 +1415,8 @@ static int __devinit pm8xxx_led_probe(struct platform_device *pdev) led_dat->pwm_duty_cycles = led_cfg->pwm_duty_cycles; led_dat->wled_cfg = led_cfg->wled_cfg; led_dat->max_current = led_cfg->max_current; + led_dat->lock_update = 0; + led_dat->blink = 0; if (!((led_dat->id >= PM8XXX_ID_LED_KB_LIGHT) && (led_dat->id < PM8XXX_ID_MAX))) { @@ -979,6 +1448,7 @@ static int __devinit pm8xxx_led_probe(struct platform_device *pdev) led_dat->cdev.default_trigger = curr_led->default_trigger; led_dat->cdev.brightness_set = pm8xxx_led_set; led_dat->cdev.brightness_get = pm8xxx_led_get; + led_dat->cdev.max_brightness = LED_FULL; led_dat->cdev.brightness = LED_OFF; led_dat->cdev.flags = curr_led->flags; led_dat->dev = &pdev->dev; @@ -987,7 +1457,7 @@ static int __devinit pm8xxx_led_probe(struct platform_device *pdev) if (rc < 0) goto fail_id_check; - rc = pm8xxx_set_led_mode_and_max_brightness(led_dat, + rc = pm8xxx_set_led_mode_and_adjust_brightness(led_dat, led_cfg->mode, led_cfg->max_current); if (rc < 0) goto fail_id_check; @@ -1002,11 +1472,16 @@ static int __devinit pm8xxx_led_probe(struct platform_device *pdev) goto fail_id_check; } + //rc = device_create_file(&pdev->dev, &dev_attr_max_current); + //rc = device_create_file(&pdev->dev, &dev_attr_adjust_brightness); + rc = device_create_file(led_dat->cdev.dev, &dev_attr_max_current); + rc = device_create_file(led_dat->cdev.dev, &dev_attr_adjust_brightness); + /* configure default state */ if (led_cfg->default_state) - led->cdev.brightness = led_dat->cdev.max_brightness; + led_dat->cdev.brightness = led_dat->cdev.max_brightness; else - led->cdev.brightness = LED_OFF; + led_dat->cdev.brightness = LED_OFF; if (led_cfg->mode != PM8XXX_LED_MODE_MANUAL) { if (led_dat->id == PM8XXX_ID_RGB_LED_RED || @@ -1018,22 +1493,56 @@ static int __devinit pm8xxx_led_probe(struct platform_device *pdev) led_dat->cdev.max_brightness); if (led_dat->pwm_channel != -1) { - led_dat->cdev.max_brightness = LED_FULL; + if (led_cfg->pwm_adjust_brightness) { + led_dat->adjust_brightness = led_cfg->pwm_adjust_brightness; + } else { + led_dat->adjust_brightness = 100; + } + rc = pm8xxx_led_pwm_configure(led_dat); if (rc) { dev_err(&pdev->dev, "failed to " "configure LED, error: %d\n", rc); goto fail_id_check; } - schedule_work(&led->work); + schedule_work(&led->work); } } else { - __pm8xxx_led_work(led_dat, led->cdev.brightness); + __pm8xxx_led_work(led_dat, led_dat->cdev.brightness); } } + led->use_pwm = pdata->use_pwm; + platform_set_drvdata(pdev, led); - printk("[leds_pm8xxx] pm8xxx_led_probe end\n"); + + rc = device_create_file(&pdev->dev, &dev_attr_lock); + if (rc) { + device_remove_file(&pdev->dev, &dev_attr_lock); + dev_err(&pdev->dev, "failed device_create_file(lock)\n"); + } + + rc = device_create_file(&pdev->dev, &dev_attr_status); + + if (led->use_pwm) { + rc = device_create_file(&pdev->dev, &dev_attr_blink); + if (rc) { + device_remove_file(&pdev->dev, &dev_attr_blink); + dev_err(&pdev->dev, "failed device_create_file(blink)\n"); + } + + rc = device_create_file(&pdev->dev, &dev_attr_grppwm); + if (rc) { + device_remove_file(&pdev->dev, &dev_attr_grppwm); + dev_err(&pdev->dev, "failed device_create_fiaild(grppwm)\n"); + } + + rc = device_create_file(&pdev->dev, &dev_attr_grpfreq); + if (rc) { + device_remove_file(&pdev->dev, &dev_attr_grpfreq); + dev_err(&pdev->dev, "failed device_create_file(grpfreq)\n"); + } + } return 0; @@ -1061,8 +1570,17 @@ static int __devexit pm8xxx_led_remove(struct platform_device *pdev) cancel_work_sync(&led[i].work); mutex_destroy(&led[i].lock); led_classdev_unregister(&led[i].cdev); - if (led[i].pwm_dev != NULL) + if (led[i].pwm_dev != NULL) { pwm_free(led[i].pwm_dev); + } + } + + device_remove_file(&pdev->dev, &dev_attr_lock); + + if (led->use_pwm) { + device_remove_file(&pdev->dev, &dev_attr_blink); + device_remove_file(&pdev->dev, &dev_attr_grppwm); + device_remove_file(&pdev->dev, &dev_attr_grpfreq); } kfree(led); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 17b918d3d6b..c276ad09ace 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3433,6 +3433,8 @@ static void handle_stripe(struct stripe_head *sh) set_bit(R5_Wantwrite, &dev->flags); if (prexor) continue; + if (s.failed > 1) + continue; if (!test_bit(R5_Insync, &dev->flags) || ((i == sh->pd_idx || i == sh->qd_idx) && s.failed == 0)) diff --git a/drivers/media/platform/msm/camera_v1/actuators/msm_actuator.c b/drivers/media/platform/msm/camera_v1/actuators/msm_actuator.c index 0b83993a105..267efaae9af 100644 --- a/drivers/media/platform/msm/camera_v1/actuators/msm_actuator.c +++ b/drivers/media/platform/msm/camera_v1/actuators/msm_actuator.c @@ -29,6 +29,9 @@ extern uint8_t imx111_afcalib_data[4]; #define ACTUATOR_MIN_MOVE_RANGE 200 // TBD #endif +struct region_params_t tmp_region_params[MAX_ACTUATOR_REGION]; +unsigned char frun = 0; + static struct msm_actuator_ctrl_t msm_actuator_t; static struct msm_actuator msm_vcm_actuator_table; static struct msm_actuator msm_piezo_actuator_table; @@ -75,6 +78,11 @@ static int32_t msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl, int32_t rc = 0; struct msm_camera_i2c_reg_tbl *i2c_tbl = a_ctrl->i2c_reg_tbl; CDBG("%s: IN\n", __func__); + + uint8_t hw_reg_write = 1; + if (a_ctrl->curr_hwparams == hw_params) + hw_reg_write = 0; + for (i = 0; i < size; i++) { if (write_arr[i].reg_write_type == MSM_ACTUATOR_WRITE_DAC) { value = (next_lens_position << @@ -104,19 +112,25 @@ static int32_t msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl, i2c_byte1 = (value & 0xFF00) >> 8; i2c_byte2 = value & 0xFF; } + i2c_tbl[a_ctrl->i2c_tbl_index].reg_addr = i2c_byte1; + i2c_tbl[a_ctrl->i2c_tbl_index].reg_data = i2c_byte2; + i2c_tbl[a_ctrl->i2c_tbl_index].delay = delay; + a_ctrl->i2c_tbl_index++; } else { - i2c_byte1 = write_arr[i].reg_addr; - i2c_byte2 = (hw_dword & write_arr[i].hw_mask) >> - write_arr[i].hw_shift; + if (hw_reg_write) { + i2c_byte1 = write_arr[i].reg_addr; + i2c_byte2 = (hw_dword & write_arr[i].hw_mask) >> + write_arr[i].hw_shift; + i2c_tbl[a_ctrl->i2c_tbl_index].reg_addr = i2c_byte1; + i2c_tbl[a_ctrl->i2c_tbl_index].reg_data = i2c_byte2; + i2c_tbl[a_ctrl->i2c_tbl_index].delay = delay; + a_ctrl->i2c_tbl_index++; + } } - CDBG("%s: i2c_byte1:0x%x, i2c_byte2:0x%x\n", __func__, - i2c_byte1, i2c_byte2); - i2c_tbl[a_ctrl->i2c_tbl_index].reg_addr = i2c_byte1; - i2c_tbl[a_ctrl->i2c_tbl_index].reg_data = i2c_byte2; - i2c_tbl[a_ctrl->i2c_tbl_index].delay = delay; - a_ctrl->i2c_tbl_index++; } CDBG("%s: OUT\n", __func__); + if (rc == 0) + a_ctrl->curr_hwparams = hw_params; return rc; } @@ -389,6 +403,13 @@ static int32_t msm_actuator_init_default_step_table(struct msm_actuator_ctrl_t * CDBG("%s called\n", __func__); + if(!frun){ + memcpy(&tmp_region_params,&a_ctrl->region_params,sizeof(tmp_region_params)); + frun++; + }else{ + memcpy(&a_ctrl->region_params,&tmp_region_params,sizeof(tmp_region_params)); + } + for (; data_size > 0; data_size--) max_code_size *= 2; @@ -664,6 +685,7 @@ static int32_t msm_actuator_set_default_focus( static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl) { int32_t rc = 0; +#ifdef CONFIG_SEKONIX_LENS_ACT int cur_pos = a_ctrl->curr_step_pos; struct msm_actuator_move_params_t move_params; @@ -674,7 +696,7 @@ static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl) a_ctrl, &move_params); msleep(300); } - +#endif if (a_ctrl->vcm_enable) { rc = gpio_direction_output(a_ctrl->vcm_pwd, 0); if (!rc) diff --git a/drivers/media/platform/msm/camera_v1/actuators/msm_actuator.h b/drivers/media/platform/msm/camera_v1/actuators/msm_actuator.h index 5c050d127fc..ca346ff22a2 100644 --- a/drivers/media/platform/msm/camera_v1/actuators/msm_actuator.h +++ b/drivers/media/platform/msm/camera_v1/actuators/msm_actuator.h @@ -89,6 +89,7 @@ struct msm_actuator_ctrl_t { uint16_t initial_code; struct msm_camera_i2c_reg_tbl *i2c_reg_tbl; uint16_t i2c_tbl_index; + uint32_t curr_hwparams; }; #ifdef CONFIG_MSM_ACTUATOR diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index e9afafb9f07..8446bf11fea 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -494,8 +494,6 @@ msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq) if (mrq->data) mrq->data->bytes_xfered = host->curr.data_xfered; - if (mrq->cmd->error == -ETIMEDOUT) - mdelay(5); msmsdcc_reset_dpsm(host); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 33a17609dd0..b2b715fdccd 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -293,6 +293,18 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, atomic_add(buffers_added, &(pool->available)); } +/* + * The final 8 bytes of the buffer list is a counter of frames dropped + * because there was not a buffer in the buffer list capable of holding + * the frame. + */ +static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter) +{ + __be64 *p = adapter->buffer_list_addr + 4096 - 8; + + adapter->rx_no_buffer = be64_to_cpup(p); +} + /* replenish routine */ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) { @@ -308,8 +320,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) ibmveth_replenish_buffer_pool(adapter, pool); } - adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) + - 4096 - 8); + ibmveth_update_rx_no_buffer(adapter); } /* empty and free ana buffer pool - also used to do cleanup in error paths */ @@ -692,8 +703,7 @@ static int ibmveth_close(struct net_device *netdev) free_irq(netdev->irq, netdev); - adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) + - 4096 - 8); + ibmveth_update_rx_no_buffer(adapter); ibmveth_cleanup(adapter); diff --git a/drivers/net/wireless/bcmdhd/dhd_sdio.c b/drivers/net/wireless/bcmdhd/dhd_sdio.c index 0ec95d265f3..bf96cfe7f67 100644 --- a/drivers/net/wireless/bcmdhd/dhd_sdio.c +++ b/drivers/net/wireless/bcmdhd/dhd_sdio.c @@ -6865,7 +6865,9 @@ dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag) #if defined(OOB_INTR_ONLY) /* Clean up any pending IRQ */ + dhd_enable_oob_intr(bus, FALSE); bcmsdh_set_irq(FALSE); + bcmsdh_unregister_oob_intr(); #endif /* defined(OOB_INTR_ONLY) */ /* Clean tx/rx buffer pointers, detach from the dongle */ diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c index 886f5650444..6a271e91e88 100644 --- a/drivers/staging/et131x/et131x.c +++ b/drivers/staging/et131x/et131x.c @@ -1478,22 +1478,16 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) * * Return 0 on success, errno on failure (as defined in errno.h) */ -static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) +static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, + u16 value) { struct mac_regs __iomem *mac = &adapter->regs->mac; - struct phy_device *phydev = adapter->phydev; int status = 0; - u8 addr; u32 delay = 0; u32 mii_addr; u32 mii_cmd; u32 mii_indicator; - if (!phydev) - return -EIO; - - addr = phydev->addr; - /* Save a local copy of the registers we are dealing with so we can * set them back */ @@ -1550,6 +1544,7 @@ static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, { u16 reg; u16 mask = 0x0001 << bitnum; + struct phy_device *phydev = adapter->phydev; /* Read the requested register */ et131x_mii_read(adapter, regnum, ®); @@ -1560,11 +1555,11 @@ static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, break; case TRUEPHY_BIT_SET: - et131x_mii_write(adapter, regnum, reg | mask); + et131x_mii_write(adapter, phydev->addr, regnum, reg | mask); break; case TRUEPHY_BIT_CLEAR: - et131x_mii_write(adapter, regnum, reg & ~mask); + et131x_mii_write(adapter, phydev->addr, regnum, reg & ~mask); break; default: @@ -1715,17 +1710,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 val struct net_device *netdev = bus->priv; struct et131x_adapter *adapter = netdev_priv(netdev); - return et131x_mii_write(adapter, reg, value); -} - -static int et131x_mdio_reset(struct mii_bus *bus) -{ - struct net_device *netdev = bus->priv; - struct et131x_adapter *adapter = netdev_priv(netdev); - - et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); - - return 0; + return et131x_mii_write(adapter, phy_addr, reg, value); } /** @@ -1741,12 +1726,13 @@ static int et131x_mdio_reset(struct mii_bus *bus) static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) { u16 data; + struct phy_device *phydev = adapter->phydev; et131x_mii_read(adapter, MII_BMCR, &data); data &= ~BMCR_PDOWN; if (down) data |= BMCR_PDOWN; - et131x_mii_write(adapter, MII_BMCR, data); + et131x_mii_write(adapter, phydev->addr, MII_BMCR, data); } /** @@ -1759,6 +1745,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter) u16 imr; u16 isr; u16 lcr2; + struct phy_device *phydev = adapter->phydev; et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr); et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr); @@ -1770,7 +1757,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter) ET_PHY_INT_MASK_LINKSTAT & ET_PHY_INT_MASK_ENABLE); - et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr); + et131x_mii_write(adapter, phydev->addr, PHY_INTERRUPT_MASK, imr); /* Set the LED behavior such that LED 1 indicates speed (off = * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates @@ -1791,7 +1778,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter) else lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); - et131x_mii_write(adapter, PHY_LED_2, lcr2); + et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2); } } @@ -4202,14 +4189,14 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, ®ister18); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18 | 0x4); - et131x_mii_write(adapter, PHY_INDEX_REG, + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18 | 0x4); + et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG, register18 | 0x8402); - et131x_mii_write(adapter, PHY_DATA_REG, + et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG, register18 | 511); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, register18); } et1310_config_flow_control(adapter); @@ -4221,7 +4208,8 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_CONFIG, ®); reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; - et131x_mii_write(adapter, PHY_CONFIG, reg); + et131x_mii_write(adapter, phydev->addr, PHY_CONFIG, + reg); } et131x_set_rx_dma_timer(adapter); @@ -4254,14 +4242,17 @@ static void et131x_adjust_link(struct net_device *netdev) et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, ®ister18); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18 | 0x4); - et131x_mii_write(adapter, PHY_INDEX_REG, - register18 | 0x8402); - et131x_mii_write(adapter, PHY_DATA_REG, - register18 | 511); - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, - register18); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, + register18 | 0x4); + et131x_mii_write(adapter, phydev->addr, + PHY_INDEX_REG, + register18 | 0x8402); + et131x_mii_write(adapter, phydev->addr, + PHY_DATA_REG, register18 | 511); + et131x_mii_write(adapter, phydev->addr, + PHY_MPHY_CONTROL_REG, + register18); } /* Free the packets being actively sent & stopped */ @@ -5343,10 +5334,6 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev, /* Copy address into the net_device struct */ memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); - /* Init variable for counting how long we do not have link status */ - adapter->boot_coma = 0; - et1310_disable_phy_coma(adapter); - rc = -ENOMEM; /* Setup the mii_bus struct */ @@ -5362,7 +5349,6 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev, adapter->mii_bus->priv = netdev; adapter->mii_bus->read = et131x_mdio_read; adapter->mii_bus->write = et131x_mdio_write; - adapter->mii_bus->reset = et131x_mdio_reset; adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); if (!adapter->mii_bus->irq) { dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); @@ -5387,6 +5373,10 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev, /* Setup et1310 as per the documentation */ et131x_adapter_setup(adapter); + /* Init variable for counting how long we do not have link status */ + adapter->boot_coma = 0; + et1310_disable_phy_coma(adapter); + /* We can enable interrupts now * * NOTE - Because registration of interrupt handler is done in the diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index c48cf891e6b..d53f3966804 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1080,12 +1080,12 @@ pci_omegapci_setup(struct serial_private *priv, static int pci_brcm_trumanage_setup(struct serial_private *priv, const struct pciserial_board *board, - struct uart_8250_port *port, int idx) + struct uart_port *port, int idx) { int ret = pci_default_setup(priv, board, port, idx); - port->port.type = PORT_BRCM_TRUMANAGE; - port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE); + port->type = PORT_BRCM_TRUMANAGE; + port->flags = (port->flags | UPF_FIXED_PORT | UPF_FIXED_TYPE); return ret; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 33cff0153b2..8882d654b0d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -98,6 +98,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) /* AMD PLL quirk */ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) xhci->quirks |= XHCI_AMD_PLL_FIX; + + if (pdev->vendor == PCI_VENDOR_ID_AMD) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) { xhci->quirks |= XHCI_EP_LIMIT_QUIRK; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6f1b36d1541..95e8648ade4 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2528,7 +2528,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, * last TRB of the previous TD. The command completion handle * will take care the rest. */ - if (!event_seg && trb_comp_code == COMP_STOP_INVAL) { + if (!event_seg && (trb_comp_code == COMP_STOP || + trb_comp_code == COMP_STOP_INVAL)) { ret = 0; goto cleanup; } diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 7af163da9a7..f1bfd0131e0 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -3248,6 +3248,7 @@ static const struct usb_device_id sisusb_table[] = { { USB_DEVICE(0x0711, 0x0918) }, { USB_DEVICE(0x0711, 0x0920) }, { USB_DEVICE(0x0711, 0x0950) }, + { USB_DEVICE(0x0711, 0x5200) }, { USB_DEVICE(0x182d, 0x021c) }, { USB_DEVICE(0x182d, 0x0269) }, { } diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c index 366b205f746..b5018bc4362 100755 --- a/drivers/usb/otg/msm_otg.c +++ b/drivers/usb/otg/msm_otg.c @@ -2121,6 +2121,8 @@ static void msm_chg_block_off(struct msm_otg *motg) /* Clear alt interrupt latch and enable bits */ ulpi_write(phy, 0x1F, 0x92); ulpi_write(phy, 0x1F, 0x95); + /* re-enable DP and DM pull down resistors */ + ulpi_write(phy, 0x6, 0xB); break; default: break; @@ -2765,6 +2767,9 @@ static void msm_otg_sm_work(struct work_struct *w) if (TA_WAIT_BCON > 0) msm_otg_start_timer(motg, TA_WAIT_BCON, A_WAIT_BCON); + + /* Clear BSV in host mode */ + clear_bit(B_SESS_VLD, &motg->inputs); msm_otg_start_host(otg, 1); msm_chg_enable_aca_det(motg); msm_chg_disable_aca_intr(motg); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 89b5664aa53..121a05207d8 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -162,6 +162,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, @@ -956,6 +957,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, + /* ekey Devices */ + { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 106cc16cc6e..0eb2e97bcb4 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -42,6 +42,8 @@ /* www.candapter.com Ewert Energy Systems CANdapter device */ #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ +#define FTDI_BM_ATOM_NANO_PID 0xa559 /* Basic Micro ATOM Nano USB2Serial */ + /* * Texas Instruments XDS100v2 JTAG / BeagleBone A3 * http://processors.wiki.ti.com/index.php/XDS100 @@ -1369,3 +1371,8 @@ #define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */ #define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */ #define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */ + +/* + * ekey biometric systems GmbH (http://ekey.net/) + */ +#define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e1e05bad2be..703ebe7eaa9 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -499,6 +499,10 @@ static void option_instat_callback(struct urb *urb); #define INOVIA_VENDOR_ID 0x20a6 #define INOVIA_SEW858 0x1105 +/* VIA Telecom */ +#define VIATELECOM_VENDOR_ID 0x15eb +#define VIATELECOM_PRODUCT_CDS7 0x0001 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -1744,6 +1748,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, + { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 9d768ac5a60..3e450b4e9c5 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -51,6 +51,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 42bc082896a..71fd9da1d6e 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -22,6 +22,7 @@ #define PL2303_PRODUCT_ID_GPRS 0x0609 #define PL2303_PRODUCT_ID_HCR331 0x331a #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 +#define PL2303_PRODUCT_ID_ZTEK 0xe1f1 #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 9a145e4b816..a08230ebe9c 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -794,29 +794,37 @@ int usb_serial_probe(struct usb_interface *interface, if (usb_endpoint_is_bulk_in(endpoint)) { /* we found a bulk in endpoint */ dbg("found bulk in on endpoint %d", i); - bulk_in_endpoint[num_bulk_in] = endpoint; - ++num_bulk_in; + if (num_bulk_in < MAX_NUM_PORTS) { + bulk_in_endpoint[num_bulk_in] = endpoint; + ++num_bulk_in; + } } if (usb_endpoint_is_bulk_out(endpoint)) { /* we found a bulk out endpoint */ dbg("found bulk out on endpoint %d", i); - bulk_out_endpoint[num_bulk_out] = endpoint; - ++num_bulk_out; + if (num_bulk_out < MAX_NUM_PORTS) { + bulk_out_endpoint[num_bulk_out] = endpoint; + ++num_bulk_out; + } } if (usb_endpoint_is_int_in(endpoint)) { /* we found a interrupt in endpoint */ dbg("found interrupt in on endpoint %d", i); - interrupt_in_endpoint[num_interrupt_in] = endpoint; - ++num_interrupt_in; + if (num_interrupt_in < MAX_NUM_PORTS) { + interrupt_in_endpoint[num_interrupt_in] = endpoint; + ++num_interrupt_in; + } } if (usb_endpoint_is_int_out(endpoint)) { /* we found an interrupt out endpoint */ dbg("found interrupt out on endpoint %d", i); - interrupt_out_endpoint[num_interrupt_out] = endpoint; - ++num_interrupt_out; + if (num_interrupt_out < MAX_NUM_PORTS) { + interrupt_out_endpoint[num_interrupt_out] = endpoint; + ++num_interrupt_out; + } } } @@ -839,8 +847,10 @@ int usb_serial_probe(struct usb_interface *interface, if (usb_endpoint_is_int_in(endpoint)) { /* we found a interrupt in endpoint */ dbg("found interrupt in for Prolific device on separate interface"); - interrupt_in_endpoint[num_interrupt_in] = endpoint; - ++num_interrupt_in; + if (num_interrupt_in < MAX_NUM_PORTS) { + interrupt_in_endpoint[num_interrupt_in] = endpoint; + ++num_interrupt_in; + } } } } @@ -879,6 +889,11 @@ int usb_serial_probe(struct usb_interface *interface, num_ports = type->num_ports; } + if (num_ports > MAX_NUM_PORTS) { + dev_warn(&interface->dev, "too many ports requested: %d\n", num_ports); + num_ports = MAX_NUM_PORTS; + } + serial->num_ports = num_ports; serial->num_bulk_in = num_bulk_in; serial->num_bulk_out = num_bulk_out; diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 0d06d7ca86f..bf7014d49a5 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -953,6 +953,10 @@ static void command_port_read_callback(struct urb *urb) dbg("%s - command_info is NULL, exiting.", __func__); return; } + if (!urb->actual_length) { + dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__); + return; + } if (status) { dbg("%s - nonzero urb status: %d", __func__, status); if (status != -ENOENT) @@ -974,7 +978,8 @@ static void command_port_read_callback(struct urb *urb) /* These are unsolicited reports from the firmware, hence no waiting command to wakeup */ dbg("%s - event received", __func__); - } else if (data[0] == WHITEHEAT_GET_DTR_RTS) { + } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) && + (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) { memcpy(command_info->result_buffer, &data[1], urb->actual_length - 1); command_info->command_finished = WHITEHEAT_CMD_COMPLETE; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 43944c6d7b4..8d6ac6bec9e 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1653,6 +1653,12 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry, target_dentry, toName); } + /* force revalidate to go get info when needed */ + CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0; + + source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime = + target_dir->i_mtime = current_fs_time(source_dir->i_sb); + cifs_rename_exit: kfree(info_buf_source); kfree(fromName); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 740cad8dcd8..6a7c4837cc7 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -614,6 +614,8 @@ static int ext2_get_blocks(struct inode *inode, int count = 0; ext2_fsblk_t first_block = 0; + BUG_ON(maxblocks == 0); + depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary); if (depth == 0) diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c index 1c3312858fc..e98171a11cf 100644 --- a/fs/ext2/xip.c +++ b/fs/ext2/xip.c @@ -35,6 +35,7 @@ __ext2_get_block(struct inode *inode, pgoff_t pgoff, int create, int rc; memset(&tmp, 0, sizeof(struct buffer_head)); + tmp.b_size = 1 << inode->i_blkbits; rc = ext2_get_block(inode, pgoff, &tmp, create); *result = tmp.b_blocknr; diff --git a/fs/file.c b/fs/file.c index dcbc172ccca..349a26e32d6 100644 --- a/fs/file.c +++ b/fs/file.c @@ -421,7 +421,7 @@ struct files_struct init_files = { .close_on_exec = init_files.close_on_exec_init, .open_fds = init_files.open_fds_init, }, - .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), + .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock), }; /* diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index e92a342f14e..e379b870cec 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -68,7 +68,7 @@ static void isofs_put_super(struct super_block *sb) return; } -static int isofs_read_inode(struct inode *); +static int isofs_read_inode(struct inode *, int relocated); static int isofs_statfs (struct dentry *, struct kstatfs *); static struct kmem_cache *isofs_inode_cachep; @@ -1264,7 +1264,7 @@ static int isofs_read_level3_size(struct inode *inode) goto out; } -static int isofs_read_inode(struct inode *inode) +static int isofs_read_inode(struct inode *inode, int relocated) { struct super_block *sb = inode->i_sb; struct isofs_sb_info *sbi = ISOFS_SB(sb); @@ -1409,7 +1409,7 @@ static int isofs_read_inode(struct inode *inode) */ if (!high_sierra) { - parse_rock_ridge_inode(de, inode); + parse_rock_ridge_inode(de, inode, relocated); /* if we want uid/gid set, override the rock ridge setting */ if (sbi->s_uid_set) inode->i_uid = sbi->s_uid; @@ -1488,9 +1488,10 @@ static int isofs_iget5_set(struct inode *ino, void *data) * offset that point to the underlying meta-data for the inode. The * code below is otherwise similar to the iget() code in * include/linux/fs.h */ -struct inode *isofs_iget(struct super_block *sb, - unsigned long block, - unsigned long offset) +struct inode *__isofs_iget(struct super_block *sb, + unsigned long block, + unsigned long offset, + int relocated) { unsigned long hashval; struct inode *inode; @@ -1512,7 +1513,7 @@ struct inode *isofs_iget(struct super_block *sb, return ERR_PTR(-ENOMEM); if (inode->i_state & I_NEW) { - ret = isofs_read_inode(inode); + ret = isofs_read_inode(inode, relocated); if (ret < 0) { iget_failed(inode); inode = ERR_PTR(ret); diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index 0e73f63d927..50cd5839844 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h @@ -107,7 +107,7 @@ extern int iso_date(char *, int); struct inode; /* To make gcc happy */ -extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *); +extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated); extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *); extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *); @@ -118,9 +118,24 @@ extern struct dentry *isofs_lookup(struct inode *, struct dentry *, struct namei extern struct buffer_head *isofs_bread(struct inode *, sector_t); extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long); -extern struct inode *isofs_iget(struct super_block *sb, - unsigned long block, - unsigned long offset); +struct inode *__isofs_iget(struct super_block *sb, + unsigned long block, + unsigned long offset, + int relocated); + +static inline struct inode *isofs_iget(struct super_block *sb, + unsigned long block, + unsigned long offset) +{ + return __isofs_iget(sb, block, offset, 0); +} + +static inline struct inode *isofs_iget_reloc(struct super_block *sb, + unsigned long block, + unsigned long offset) +{ + return __isofs_iget(sb, block, offset, 1); +} /* Because the inode number is no longer relevant to finding the * underlying meta-data for an inode, we are free to choose a more diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c index 70e79d0c756..ee62cc0c249 100644 --- a/fs/isofs/rock.c +++ b/fs/isofs/rock.c @@ -288,12 +288,16 @@ int get_rock_ridge_filename(struct iso_directory_record *de, goto out; } +#define RR_REGARD_XA 1 +#define RR_RELOC_DE 2 + static int parse_rock_ridge_inode_internal(struct iso_directory_record *de, - struct inode *inode, int regard_xa) + struct inode *inode, int flags) { int symlink_len = 0; int cnt, sig; + unsigned int reloc_block; struct inode *reloc; struct rock_ridge *rr; int rootflag; @@ -305,7 +309,7 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de, init_rock_state(&rs, inode); setup_rock_ridge(de, inode, &rs); - if (regard_xa) { + if (flags & RR_REGARD_XA) { rs.chr += 14; rs.len -= 14; if (rs.len < 0) @@ -485,12 +489,22 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de, "relocated directory\n"); goto out; case SIG('C', 'L'): - ISOFS_I(inode)->i_first_extent = - isonum_733(rr->u.CL.location); - reloc = - isofs_iget(inode->i_sb, - ISOFS_I(inode)->i_first_extent, - 0); + if (flags & RR_RELOC_DE) { + printk(KERN_ERR + "ISOFS: Recursive directory relocation " + "is not supported\n"); + goto eio; + } + reloc_block = isonum_733(rr->u.CL.location); + if (reloc_block == ISOFS_I(inode)->i_iget5_block && + ISOFS_I(inode)->i_iget5_offset == 0) { + printk(KERN_ERR + "ISOFS: Directory relocation points to " + "itself\n"); + goto eio; + } + ISOFS_I(inode)->i_first_extent = reloc_block; + reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0); if (IS_ERR(reloc)) { ret = PTR_ERR(reloc); goto out; @@ -637,9 +651,11 @@ static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit) return rpnt; } -int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode) +int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode, + int relocated) { - int result = parse_rock_ridge_inode_internal(de, inode, 0); + int flags = relocated ? RR_RELOC_DE : 0; + int result = parse_rock_ridge_inode_internal(de, inode, flags); /* * if rockridge flag was reset and we didn't look for attributes @@ -647,7 +663,8 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode) */ if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1) && (ISOFS_SB(inode->i_sb)->s_rock == 2)) { - result = parse_rock_ridge_inode_internal(de, inode, 14); + result = parse_rock_ridge_inode_internal(de, inode, + flags | RR_REGARD_XA); } return result; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c8d9a5488e7..e3c959e8481 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2051,6 +2051,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) { struct nfs4_closedata *calldata = data; struct nfs4_state *state = calldata->state; + bool is_rdonly, is_wronly, is_rdwr; int call_close = 0; dprintk("%s: begin!\n", __func__); @@ -2058,18 +2059,24 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) return; task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; - calldata->arg.fmode = FMODE_READ|FMODE_WRITE; spin_lock(&state->owner->so_lock); + is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); + is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); + is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); + /* Calculate the current open share mode */ + calldata->arg.fmode = 0; + if (is_rdonly || is_rdwr) + calldata->arg.fmode |= FMODE_READ; + if (is_wronly || is_rdwr) + calldata->arg.fmode |= FMODE_WRITE; /* Calculate the change in open mode */ if (state->n_rdwr == 0) { if (state->n_rdonly == 0) { - call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); - call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); + call_close |= is_rdonly || is_rdwr; calldata->arg.fmode &= ~FMODE_READ; } if (state->n_wronly == 0) { - call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); - call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); + call_close |= is_wronly || is_rdwr; calldata->arg.fmode &= ~FMODE_WRITE; } } diff --git a/include/asm-generic/processor.h b/include/asm-generic/processor.h new file mode 100644 index 00000000000..0b57d920cdd --- /dev/null +++ b/include/asm-generic/processor.h @@ -0,0 +1,24 @@ +/* + * include/asm-generic/processor.h + * + * Copyright (c) 2014 NVIDIA Corporation. All rights reserved. + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ASM_GENERIC_PROCESSOR_H_ +#define _ASM_GENERIC_PROCESSOR_H_ + +#include + +#ifndef cpu_read_relax +#define cpu_read_relax() cpu_relax() +#endif + +#endif /*_ASM_GENERIC_PROCESSOR_H_*/ diff --git a/include/asm-generic/relaxed.h b/include/asm-generic/relaxed.h new file mode 100644 index 00000000000..37dc310a4b4 --- /dev/null +++ b/include/asm-generic/relaxed.h @@ -0,0 +1,30 @@ +/* + * include/asm-generic/relaxed.h + * + * Copyright (c) 2014 NVIDIA Corporation. All rights reserved. + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ASM_GENERIC_RELAXED_H_ +#define _ASM_GENERIC_RELAXED_H_ + +#ifndef cpu_relaxed_read_short +#define cpu_relaxed_read_short(p) (*(p)) +#endif + +#ifndef cpu_relaxed_read +#define cpu_relaxed_read(p) (*(p)) +#endif + +#ifndef cpu_relaxed_read_long +#define cpu_relaxed_read_long(p) (*(p)) +#endif + +#endif /*_ASM_GENERIC_RELAXED_H_*/ diff --git a/include/linux/ctype.h b/include/linux/ctype.h index 8acfe312f94..653589e3e30 100644 --- a/include/linux/ctype.h +++ b/include/linux/ctype.h @@ -61,4 +61,10 @@ static inline char _tolower(const char c) return c | 0x20; } +/* Fast check for octal digit */ +static inline int isodigit(const char c) +{ + return c >= '0' && c <= '7'; +} + #endif diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index cc07d2777bb..b4542b9580a 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -5,6 +5,7 @@ * * Copyright(C) 2005, Thomas Gleixner * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2014, NVIDIA CORPORATION. All rights reserved. * * data type definitions, declarations, prototypes * @@ -23,6 +24,7 @@ #include #include #include +#include struct hrtimer_clock_base; struct hrtimer_cpu_base; @@ -415,6 +417,11 @@ static inline int hrtimer_callback_running(struct hrtimer *timer) return timer->state & HRTIMER_STATE_CALLBACK; } +static inline int hrtimer_callback_running_relaxed(struct hrtimer *timer) +{ + return cpu_relaxed_read_long(&timer->state) & HRTIMER_STATE_CALLBACK; +} + /* Forward a hrtimer so it expires after now: */ extern u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); diff --git a/include/linux/idr.h b/include/linux/idr.h index 52a9da29529..dffed8f9b07 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -136,7 +136,7 @@ struct ida { struct ida_bitmap *free_bitmap; }; -#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, } +#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) int ida_pre_get(struct ida *ida, gfp_t gfp_mask); diff --git a/include/linux/input/doubletap2wake.h b/include/linux/input/doubletap2wake.h new file mode 100644 index 00000000000..d3f37ddbdc4 --- /dev/null +++ b/include/linux/input/doubletap2wake.h @@ -0,0 +1,27 @@ +/* + * include/linux/input/doubletap2wake.h + * + * Copyright (c) 2013, Dennis Rassmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef _LINUX_DOUBLETAP2WAKE_H +#define _LINUX_DOUBLETAP2WAKE_H + +extern int dt2w_switch; +extern struct input_dev * sweep2wake_pwrdev; + +#endif /* _LINUX_DOUBLETAP2WAKE_H */ diff --git a/include/linux/input/lge_touch_core.h b/include/linux/input/lge_touch_core.h index 10fc136930f..629d7b25275 100644 --- a/include/linux/input/lge_touch_core.h +++ b/include/linux/input/lge_touch_core.h @@ -419,7 +419,7 @@ enum{ /* Debug Mask setting */ #define TOUCH_DEBUG_PRINT (1) #define TOUCH_ERROR_PRINT (1) -#define TOUCH_INFO_PRINT (1) +//#define TOUCH_INFO_PRINT (1) #if defined(TOUCH_INFO_PRINT) #define TOUCH_INFO_MSG(fmt, args...) \ diff --git a/include/linux/input/sweep2wake.h b/include/linux/input/sweep2wake.h new file mode 100644 index 00000000000..656c40dff71 --- /dev/null +++ b/include/linux/input/sweep2wake.h @@ -0,0 +1,26 @@ +/* + * include/linux/input/sweep2wake.h + * + * Copyright (c) 2013, Dennis Rassmann + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef _LINUX_SWEEP2WAKE_H +#define _LINUX_SWEEP2WAKE_H + +extern int s2w_switch, s2w_s2sonly; + +#endif /* _LINUX_SWEEP2WAKE_H */ diff --git a/include/linux/leds-pm8xxx.h b/include/linux/leds-pm8xxx.h index 6207fe4c00c..3b08ad77b60 100644 --- a/include/linux/leds-pm8xxx.h +++ b/include/linux/leds-pm8xxx.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -72,14 +72,7 @@ enum wled_ovp_threshold { WLED_OVP_35V, WLED_OVP_32V, WLED_OVP_29V, - WLED_OVP_37V, -}; - -/*debug_mask*/ -enum{ - DEBUG_LED_NONE = 0, - DEBUG_LED_TRACE = (1U << 0), // 1 - DEBUG_LED_REG = (1U << 1), // 2 + WLED_OVP_27V, }; /** @@ -100,6 +93,7 @@ struct wled_config_data { u8 boost_curr_lim; u8 cp_select; u8 ctrl_delay_us; + u16 comp_res_val; bool dig_mod_gen_en; bool cs_out_en; bool op_fdbck; @@ -120,6 +114,7 @@ struct pm8xxx_led_config { u8 id; u8 mode; u16 max_current; + u16 pwm_adjust_brightness; int pwm_channel; u32 pwm_period_us; bool default_state; @@ -135,10 +130,12 @@ struct pm8xxx_led_config { * for each LED. It maps one-to-one with * array of LEDs * @num_configs - count of members of configs array + * @use_pwm - controlled by userspace */ struct pm8xxx_led_platform_data { struct led_platform_data *led_core; struct pm8xxx_led_config *configs; u32 num_configs; + int use_pwm; }; #endif /* __LEDS_PM8XXX_H__ */ diff --git a/include/linux/llist.h b/include/linux/llist.h index a5199f6d0e8..30019d8fa4e 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -57,6 +57,7 @@ #include #include +#include struct llist_head { struct llist_node *first; @@ -137,6 +138,11 @@ static inline bool llist_empty(const struct llist_head *head) return ACCESS_ONCE(head->first) == NULL; } +static inline bool llist_empty_relaxed(const struct llist_head *head) +{ + return (void *)cpu_relaxed_read_long(&head->first) == NULL; +} + static inline struct llist_node *llist_next(struct llist_node *node) { return node->next; diff --git a/include/linux/mfd/pm8xxx/pwm.h b/include/linux/mfd/pm8xxx/pwm.h old mode 100755 new mode 100644 index 272aea2bcc9..604c2b1ee36 --- a/include/linux/mfd/pm8xxx/pwm.h +++ b/include/linux/mfd/pm8xxx/pwm.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -84,18 +84,12 @@ struct pm8xxx_pwm_period { * start_idx - index in the LUT */ struct pm8xxx_pwm_duty_cycles { -#ifdef CONFIG_LGE_PM_PWM_LED int *duty_pcts0; int *duty_pcts1; int num_duty_pcts0; int num_duty_pcts1; int duty_ms0; int duty_ms1; -#else - int *duty_pcts; - int num_duty_pcts; - int duty_ms; -#endif int start_idx; }; diff --git a/include/linux/sched.h b/include/linux/sched.h index 8da6051f0a4..e3a044b15a4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2090,6 +2090,7 @@ extern int task_nice(const struct task_struct *p); extern int can_nice(const struct task_struct *p, const int nice); extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); +extern int idle_cpu_relaxed(int cpu); extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); extern int sched_setscheduler_nocheck(struct task_struct *, int, diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 600060e25ec..08f342d669a 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -29,6 +29,7 @@ #include #include #include +#include typedef struct { unsigned sequence; @@ -141,9 +142,9 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s) unsigned ret; repeat: - ret = ACCESS_ONCE(s->sequence); + ret = cpu_relaxed_read((volatile u32 *)&s->sequence); if (unlikely(ret & 1)) { - cpu_relax(); + cpu_read_relax(); goto repeat; } return ret; diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index a3eb2f65b65..3eeee9672a4 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -13,4 +13,62 @@ enum string_size_units { int string_get_size(u64 size, enum string_size_units units, char *buf, int len); +#define UNESCAPE_SPACE 0x01 +#define UNESCAPE_OCTAL 0x02 +#define UNESCAPE_HEX 0x04 +#define UNESCAPE_SPECIAL 0x08 +#define UNESCAPE_ANY \ + (UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL) + +/** + * string_unescape - unquote characters in the given string + * @src: source buffer (escaped) + * @dst: destination buffer (unescaped) + * @size: size of the destination buffer (0 to unlimit) + * @flags: combination of the flags (bitwise OR): + * %UNESCAPE_SPACE: + * '\f' - form feed + * '\n' - new line + * '\r' - carriage return + * '\t' - horizontal tab + * '\v' - vertical tab + * %UNESCAPE_OCTAL: + * '\NNN' - byte with octal value NNN (1 to 3 digits) + * %UNESCAPE_HEX: + * '\xHH' - byte with hexadecimal value HH (1 to 2 digits) + * %UNESCAPE_SPECIAL: + * '\"' - double quote + * '\\' - backslash + * '\a' - alert (BEL) + * '\e' - escape + * %UNESCAPE_ANY: + * all previous together + * + * Returns amount of characters processed to the destination buffer excluding + * trailing '\0'. + * + * Because the size of the output will be the same as or less than the size of + * the input, the transformation may be performed in place. + * + * Caller must provide valid source and destination pointers. Be aware that + * destination buffer will always be NULL-terminated. Source string must be + * NULL-terminated as well. + */ +int string_unescape(char *src, char *dst, size_t size, unsigned int flags); + +static inline int string_unescape_inplace(char *buf, unsigned int flags) +{ + return string_unescape(buf, buf, 0, flags); +} + +static inline int string_unescape_any(char *src, char *dst, size_t size) +{ + return string_unescape(src, dst, size, UNESCAPE_ANY); +} + +static inline int string_unescape_any_inplace(char *buf) +{ + return string_unescape_any(buf, buf, 0); +} + #endif diff --git a/include/trace/events/random.h b/include/trace/events/random.h index 805af6db41c..6de9b68407e 100644 --- a/include/trace/events/random.h +++ b/include/trace/events/random.h @@ -8,306 +8,307 @@ #include TRACE_EVENT(add_device_randomness, - TP_PROTO(int bytes, unsigned long IP), - TP_ARGS(bytes, IP), - - TP_STRUCT__entry( - __field( int, bytes ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->bytes = bytes; - __entry->IP = IP; - ), - - TP_printk("bytes %d caller %pF", - __entry->bytes, (void *)__entry->IP) -); + TP_PROTO(int bytes, unsigned long IP), + + TP_ARGS(bytes, IP), + + TP_STRUCT__entry( + __field( int, bytes ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->bytes = bytes; + __entry->IP = IP; + ), + + TP_printk("bytes %d caller %pF", + __entry->bytes, (void *)__entry->IP) + ); DECLARE_EVENT_CLASS(random__mix_pool_bytes, - TP_PROTO(const char *pool_name, int bytes, unsigned long IP), - - TP_ARGS(pool_name, bytes, IP), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, bytes ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->bytes = bytes; - __entry->IP = IP; - ), - - TP_printk("%s pool: bytes %d caller %pF", - __entry->pool_name, __entry->bytes, (void *)__entry->IP) -); + TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + + TP_ARGS(pool_name, bytes, IP), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, bytes ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->bytes = bytes; + __entry->IP = IP; + ), + + TP_printk("%s pool: bytes %d caller %pF", + __entry->pool_name, __entry->bytes, (void *)__entry->IP) + ); DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes, - TP_PROTO(const char *pool_name, int bytes, unsigned long IP), - - TP_ARGS(pool_name, bytes, IP) -); + TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + + TP_ARGS(pool_name, bytes, IP) + ); DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock, - TP_PROTO(const char *pool_name, int bytes, unsigned long IP), - - TP_ARGS(pool_name, bytes, IP) -); + TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + + TP_ARGS(pool_name, bytes, IP) + ); TRACE_EVENT(credit_entropy_bits, - TP_PROTO(const char *pool_name, int bits, int entropy_count, - int entropy_total, unsigned long IP), - - TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, bits ) - __field( int, entropy_count ) - __field( int, entropy_total ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->bits = bits; - __entry->entropy_count = entropy_count; - __entry->entropy_total = entropy_total; - __entry->IP = IP; - ), - - TP_printk("%s pool: bits %d entropy_count %d entropy_total %d " - "caller %pF", __entry->pool_name, __entry->bits, - __entry->entropy_count, __entry->entropy_total, - (void *)__entry->IP) -); + TP_PROTO(const char *pool_name, int bits, int entropy_count, + int entropy_total, unsigned long IP), + + TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, bits ) + __field( int, entropy_count ) + __field( int, entropy_total ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->bits = bits; + __entry->entropy_count = entropy_count; + __entry->entropy_total = entropy_total; + __entry->IP = IP; + ), + + TP_printk("%s pool: bits %d entropy_count %d entropy_total %d " + "caller %pF", __entry->pool_name, __entry->bits, + __entry->entropy_count, __entry->entropy_total, + (void *)__entry->IP) + ); TRACE_EVENT(push_to_pool, - TP_PROTO(const char *pool_name, int pool_bits, int input_bits), - - TP_ARGS(pool_name, pool_bits, input_bits), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, pool_bits ) - __field( int, input_bits ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->pool_bits = pool_bits; - __entry->input_bits = input_bits; - ), - - TP_printk("%s: pool_bits %d input_pool_bits %d", - __entry->pool_name, __entry->pool_bits, - __entry->input_bits) -); + TP_PROTO(const char *pool_name, int pool_bits, int input_bits), + + TP_ARGS(pool_name, pool_bits, input_bits), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, pool_bits ) + __field( int, input_bits ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->pool_bits = pool_bits; + __entry->input_bits = input_bits; + ), + + TP_printk("%s: pool_bits %d input_pool_bits %d", + __entry->pool_name, __entry->pool_bits, + __entry->input_bits) + ); TRACE_EVENT(debit_entropy, - TP_PROTO(const char *pool_name, int debit_bits), - - TP_ARGS(pool_name, debit_bits), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, debit_bits ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->debit_bits = debit_bits; - ), - - TP_printk("%s: debit_bits %d", __entry->pool_name, - __entry->debit_bits) -); + TP_PROTO(const char *pool_name, int debit_bits), + + TP_ARGS(pool_name, debit_bits), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, debit_bits ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->debit_bits = debit_bits; + ), + + TP_printk("%s: debit_bits %d", __entry->pool_name, + __entry->debit_bits) + ); TRACE_EVENT(add_input_randomness, - TP_PROTO(int input_bits), - - TP_ARGS(input_bits), - - TP_STRUCT__entry( - __field( int, input_bits ) - ), - - TP_fast_assign( - __entry->input_bits = input_bits; - ), - - TP_printk("input_pool_bits %d", __entry->input_bits) -); + TP_PROTO(int input_bits), + + TP_ARGS(input_bits), + + TP_STRUCT__entry( + __field( int, input_bits ) + ), + + TP_fast_assign( + __entry->input_bits = input_bits; + ), + + TP_printk("input_pool_bits %d", __entry->input_bits) + ); TRACE_EVENT(add_disk_randomness, - TP_PROTO(dev_t dev, int input_bits), - - TP_ARGS(dev, input_bits), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( int, input_bits ) - ), - - TP_fast_assign( - __entry->dev = dev; - __entry->input_bits = input_bits; - ), - - TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev), - MINOR(__entry->dev), __entry->input_bits) -); + TP_PROTO(dev_t dev, int input_bits), + + TP_ARGS(dev, input_bits), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( int, input_bits ) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->input_bits = input_bits; + ), + + TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev), + MINOR(__entry->dev), __entry->input_bits) + ); TRACE_EVENT(xfer_secondary_pool, - TP_PROTO(const char *pool_name, int xfer_bits, int request_bits, - int pool_entropy, int input_entropy), - - TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy, - input_entropy), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, xfer_bits ) - __field( int, request_bits ) - __field( int, pool_entropy ) - __field( int, input_entropy ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->xfer_bits = xfer_bits; - __entry->request_bits = request_bits; - __entry->pool_entropy = pool_entropy; - __entry->input_entropy = input_entropy; - ), - - TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d " - "input_entropy %d", __entry->pool_name, __entry->xfer_bits, - __entry->request_bits, __entry->pool_entropy, - __entry->input_entropy) -); + TP_PROTO(const char *pool_name, int xfer_bits, int request_bits, + int pool_entropy, int input_entropy), + + TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy, + input_entropy), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, xfer_bits ) + __field( int, request_bits ) + __field( int, pool_entropy ) + __field( int, input_entropy ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->xfer_bits = xfer_bits; + __entry->request_bits = request_bits; + __entry->pool_entropy = pool_entropy; + __entry->input_entropy = input_entropy; + ), + + TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d " + "input_entropy %d", __entry->pool_name, __entry->xfer_bits, + __entry->request_bits, __entry->pool_entropy, + __entry->input_entropy) + ); DECLARE_EVENT_CLASS(random__get_random_bytes, - TP_PROTO(int nbytes, unsigned long IP), - - TP_ARGS(nbytes, IP), - - TP_STRUCT__entry( - __field( int, nbytes ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->nbytes = nbytes; - __entry->IP = IP; - ), - - TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP) -); + TP_PROTO(int nbytes, unsigned long IP), + + TP_ARGS(nbytes, IP), + + TP_STRUCT__entry( + __field( int, nbytes ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->nbytes = nbytes; + __entry->IP = IP; + ), + + TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP) + ); DEFINE_EVENT(random__get_random_bytes, get_random_bytes, - TP_PROTO(int nbytes, unsigned long IP), - - TP_ARGS(nbytes, IP) -); + TP_PROTO(int nbytes, unsigned long IP), + + TP_ARGS(nbytes, IP) + ); DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch, - TP_PROTO(int nbytes, unsigned long IP), - - TP_ARGS(nbytes, IP) -); + TP_PROTO(int nbytes, unsigned long IP), + + TP_ARGS(nbytes, IP) + ); DECLARE_EVENT_CLASS(random__extract_entropy, - TP_PROTO(const char *pool_name, int nbytes, int entropy_count, - unsigned long IP), - - TP_ARGS(pool_name, nbytes, entropy_count, IP), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, nbytes ) - __field( int, entropy_count ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->nbytes = nbytes; - __entry->entropy_count = entropy_count; - __entry->IP = IP; - ), - - TP_printk("%s pool: nbytes %d entropy_count %d caller %pF", - __entry->pool_name, __entry->nbytes, __entry->entropy_count, - (void *)__entry->IP) -); + TP_PROTO(const char *pool_name, int nbytes, int entropy_count, + unsigned long IP), + + TP_ARGS(pool_name, nbytes, entropy_count, IP), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, nbytes ) + __field( int, entropy_count ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->nbytes = nbytes; + __entry->entropy_count = entropy_count; + __entry->IP = IP; + ), + + TP_printk("%s pool: nbytes %d entropy_count %d caller %pF", + __entry->pool_name, __entry->nbytes, __entry->entropy_count, + (void *)__entry->IP) + ); DEFINE_EVENT(random__extract_entropy, extract_entropy, - TP_PROTO(const char *pool_name, int nbytes, int entropy_count, - unsigned long IP), - - TP_ARGS(pool_name, nbytes, entropy_count, IP) -); + TP_PROTO(const char *pool_name, int nbytes, int entropy_count, + unsigned long IP), + + TP_ARGS(pool_name, nbytes, entropy_count, IP) + ); DEFINE_EVENT(random__extract_entropy, extract_entropy_user, - TP_PROTO(const char *pool_name, int nbytes, int entropy_count, - unsigned long IP), - - TP_ARGS(pool_name, nbytes, entropy_count, IP) -); + TP_PROTO(const char *pool_name, int nbytes, int entropy_count, + unsigned long IP), + + TP_ARGS(pool_name, nbytes, entropy_count, IP) + ); TRACE_EVENT(random_read, - TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left), - - TP_ARGS(got_bits, need_bits, pool_left, input_left), - - TP_STRUCT__entry( - __field( int, got_bits ) - __field( int, need_bits ) - __field( int, pool_left ) - __field( int, input_left ) - ), - - TP_fast_assign( - __entry->got_bits = got_bits; - __entry->need_bits = need_bits; - __entry->pool_left = pool_left; - __entry->input_left = input_left; - ), - - TP_printk("got_bits %d still_needed_bits %d " - "blocking_pool_entropy_left %d input_entropy_left %d", - __entry->got_bits, __entry->got_bits, __entry->pool_left, - __entry->input_left) -); + TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left), + + TP_ARGS(got_bits, need_bits, pool_left, input_left), + + TP_STRUCT__entry( + __field( int, got_bits ) + __field( int, need_bits ) + __field( int, pool_left ) + __field( int, input_left ) + ), + + TP_fast_assign( + __entry->got_bits = got_bits; + __entry->need_bits = need_bits; + __entry->pool_left = pool_left; + __entry->input_left = input_left; + ), + + TP_printk("got_bits %d still_needed_bits %d " + "blocking_pool_entropy_left %d input_entropy_left %d", + __entry->got_bits, __entry->got_bits, __entry->pool_left, + __entry->input_left) + ); TRACE_EVENT(urandom_read, - TP_PROTO(int got_bits, int pool_left, int input_left), - - TP_ARGS(got_bits, pool_left, input_left), - - TP_STRUCT__entry( - __field( int, got_bits ) - __field( int, pool_left ) - __field( int, input_left ) - ), - - TP_fast_assign( - __entry->got_bits = got_bits; - __entry->pool_left = pool_left; - __entry->input_left = input_left; - ), - - TP_printk("got_bits %d nonblocking_pool_entropy_left %d " - "input_entropy_left %d", __entry->got_bits, - __entry->pool_left, __entry->input_left) -); + TP_PROTO(int got_bits, int pool_left, int input_left), + + TP_ARGS(got_bits, pool_left, input_left), + + TP_STRUCT__entry( + __field( int, got_bits ) + __field( int, pool_left ) + __field( int, input_left ) + ), + + TP_fast_assign( + __entry->got_bits = got_bits; + __entry->pool_left = pool_left; + __entry->input_left = input_left; + ), + + TP_printk("got_bits %d nonblocking_pool_entropy_left %d " + "input_entropy_left %d", __entry->got_bits, + __entry->pool_left, __entry->input_left) + ); #endif /* _TRACE_RANDOM_H */ diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 8677c96fd02..67a28adc944 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -385,10 +385,19 @@ static void free_css_set_work(struct work_struct *work) struct cgroup *cgrp = link->cgrp; list_del(&link->cg_link_list); list_del(&link->cgrp_link_list); + + /* + * We may not be holding cgroup_mutex, and if cgrp->count is + * dropped to 0 the cgroup can be destroyed at any time, hence + * rcu_read_lock is used to keep it alive. + */ + rcu_read_lock(); if (atomic_dec_and_test(&cgrp->count)) { check_for_release(cgrp); cgroup_wakeup_rmdir_waiter(cgrp); } + rcu_read_unlock(); + kfree(link); } write_unlock(&css_set_lock); @@ -3975,7 +3984,7 @@ static int cgroup_css_sets_empty(struct cgroup *cgrp) read_lock(&css_set_lock); list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) { struct css_set *cg = link->cg; - if (atomic_read(&cg->refcount) > 0) { + if (cg && (atomic_read(&cg->refcount) > 0)) { retval = 0; break; } diff --git a/kernel/cpu.c b/kernel/cpu.c index 074bd76b73e..1b1ada8dcfd 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -288,8 +288,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) * * Wait for the stop thread to go away. */ - while (!idle_cpu(cpu)) - cpu_relax(); + while (!idle_cpu_relaxed(cpu)) + cpu_read_relax(); /* This actually kills the CPU. */ __cpu_die(cpu); diff --git a/kernel/kthread.c b/kernel/kthread.c index 445b456424b..ce0f1650ac1 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -32,7 +32,7 @@ struct kthread_create_info /* Result passed back to kthread_create() from kthreadd. */ struct task_struct *result; - struct completion done; + struct completion *done; struct list_head list; }; @@ -177,6 +177,7 @@ static int kthread(void *_create) struct kthread_create_info *create = _create; int (*threadfn)(void *data) = create->threadfn; void *data = create->data; + struct completion *done; struct kthread self; int ret; @@ -186,10 +187,16 @@ static int kthread(void *_create) init_completion(&self.parked); current->vfork_done = &self.exited; + /* If user was SIGKILLed, I release the structure. */ + done = xchg(&create->done, NULL); + if (!done) { + kfree(create); + do_exit(-EINTR); + } /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); create->result = current; - complete(&create->done); + complete(done); schedule(); ret = -EINTR; @@ -209,7 +216,7 @@ int tsk_fork_get_node(struct task_struct *tsk) if (tsk == kthreadd_task) return tsk->pref_node_fork; #endif - return numa_node_id(); + return NUMA_NO_NODE; } static void create_kthread(struct kthread_create_info *create) @@ -222,8 +229,15 @@ static void create_kthread(struct kthread_create_info *create) /* We want our own signal handler (we take no signals by default). */ pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); if (pid < 0) { + /* If user was SIGKILLed, I release the structure. */ + struct completion *done = xchg(&create->done, NULL); + + if (!done) { + kfree(create); + return; + } create->result = ERR_PTR(pid); - complete(&create->done); + complete(done); } } @@ -247,43 +261,66 @@ static void create_kthread(struct kthread_create_info *create) * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * - * Returns a task_struct or ERR_PTR(-ENOMEM). + * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). */ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, const char namefmt[], ...) { - struct kthread_create_info create; - - create.threadfn = threadfn; - create.data = data; - create.node = node; - init_completion(&create.done); + DECLARE_COMPLETION_ONSTACK(done); + struct task_struct *task; + struct kthread_create_info *create = kmalloc(sizeof(*create), + GFP_KERNEL); + + if (!create) + return ERR_PTR(-ENOMEM); + create->threadfn = threadfn; + create->data = data; + create->node = node; + create->done = &done; spin_lock(&kthread_create_lock); - list_add_tail(&create.list, &kthread_create_list); + list_add_tail(&create->list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); - wait_for_completion(&create.done); - - if (!IS_ERR(create.result)) { + /* + * Wait for completion in killable state, for I might be chosen by + * the OOM killer while kthreadd is trying to allocate memory for + * new kernel thread. + */ + if (unlikely(wait_for_completion_killable(&done))) { + /* + * If I was SIGKILLed before kthreadd (or new kernel thread) + * calls complete(), leave the cleanup of this structure to + * that thread. + */ + if (xchg(&create->done, NULL)) + return ERR_PTR(-EINTR); + /* + * kthreadd (or new kernel thread) will call complete() + * shortly. + */ + wait_for_completion(&done); + } + task = create->result; + if (!IS_ERR(task)) { static const struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); - vsnprintf(create.result->comm, sizeof(create.result->comm), - namefmt, args); + vsnprintf(task->comm, sizeof(task->comm), namefmt, args); va_end(args); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. */ - sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); - set_cpus_allowed_ptr(create.result, cpu_all_mask); + sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); + set_cpus_allowed_ptr(task, cpu_all_mask); } - return create.result; + kfree(create); + return task; } EXPORT_SYMBOL(kthread_create_on_node); @@ -331,7 +368,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), { struct task_struct *p; - p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, + p = kthread_create_on_node(threadfn, data, cpu_to_mem(cpu), namefmt, cpu); if (IS_ERR(p)) return p; @@ -555,7 +592,7 @@ static void insert_kthread_work(struct kthread_worker *worker, list_add_tail(&work->node, pos); work->worker = worker; - if (likely(worker->task)) + if (!worker->current_work && likely(worker->task)) wake_up_process(worker->task); } diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index acbb79c9092..1a0bb1f147f 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -9,6 +9,7 @@ #include #include #include +#include /* * Called after updating RLIMIT_CPU to run cpu timer and update @@ -494,6 +495,8 @@ static void cleanup_timers(struct list_head *head, */ void posix_cpu_timers_exit(struct task_struct *tsk) { + add_device_randomness((const void*) &tsk->se.sum_exec_runtime, + sizeof(unsigned long long)); cleanup_timers(tsk->cpu_timers, tsk->utime, tsk->stime, tsk->se.sum_exec_runtime); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 85da1680fd6..ed9925c6fe3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5,6 +5,8 @@ * * Copyright (C) 1991-2002 Linus Torvalds * + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and * make semaphores SMP safe * 1998-11-19 Implemented schedule_timeout() and related stuff @@ -77,6 +79,7 @@ #include #include #include +#include #ifdef CONFIG_PARAVIRT #include #endif @@ -1194,9 +1197,10 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * is actually now running somewhere else! */ while (task_running(rq, p)) { - if (match_state && unlikely(p->state != match_state)) + if (match_state && unlikely(cpu_relaxed_read_long + (&(p->state)) != match_state)) return 0; - cpu_relax(); + cpu_read_relax(); } /* @@ -1626,7 +1630,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * If the owning (remote) cpu is still in the middle of schedule() with * this task as prev, wait until its done referencing the task. */ - while (p->on_cpu) { + while (cpu_relaxed_read(&(p->on_cpu))) { #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW /* * In case the architecture enables interrupts in @@ -1638,7 +1642,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) if (ttwu_activate_remote(p, wake_flags)) goto stat; #else - cpu_relax(); + cpu_read_relax(); #endif } /* @@ -3231,6 +3235,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) #endif +static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) +{ + u64 temp = (__force u64) rtime; + + temp *= (__force u64) utime; + + if (sizeof(cputime_t) == 4) + temp = div_u64(temp, (__force u32) total); + else + temp = div64_u64(temp, (__force u64) total); + + return (__force cputime_t) temp; +} + void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) { cputime_t rtime, utime = p->utime, total = utime + p->stime; @@ -3240,13 +3258,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) */ rtime = nsecs_to_cputime(p->se.sum_exec_runtime); - if (total) { - u64 temp = (__force u64) rtime; - - temp *= (__force u64) utime; - do_div(temp, (__force u32) total); - utime = (__force cputime_t) temp; - } else + if (total) + utime = scale_utime(utime, rtime, total); + else utime = rtime; /* @@ -3273,13 +3287,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) total = cputime.utime + cputime.stime; rtime = nsecs_to_cputime(cputime.sum_exec_runtime); - if (total) { - u64 temp = (__force u64) rtime; - - temp *= (__force u64) cputime.utime; - do_div(temp, (__force u32) total); - utime = (__force cputime_t) temp; - } else + if (total) + utime = scale_utime(cputime.utime, rtime, total); + else utime = rtime; sig->prev_utime = max(sig->prev_utime, utime); @@ -4324,6 +4334,24 @@ int idle_cpu(int cpu) return 1; } +int idle_cpu_relaxed(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + if (cpu_relaxed_read_long(&rq->curr) != rq->idle) + return 0; + + if (cpu_relaxed_read_long(&rq->nr_running)) + return 0; + +#ifdef CONFIG_SMP + if (!llist_empty_relaxed(&rq->wake_list)) + return 0; +#endif + + return 1; +} + /** * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. @@ -6337,6 +6365,8 @@ static const struct cpumask *cpu_cpu_mask(int cpu) return cpumask_of_node(cpu_to_node(cpu)); } +int sched_smt_power_savings = 0, sched_mc_power_savings = 2; + struct sd_data { struct sched_domain **__percpu sd; struct sched_group **__percpu sg; @@ -7309,9 +7339,6 @@ void __init sched_init_smp(void) hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); - /* RT runtime code needs to handle some hotplug events */ - hotcpu_notifier(update_runtime, 0); - init_hrtick(); /* Move init over to a non-isolated CPU */ diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 526c77d1b65..62dac8ce170 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -691,15 +691,6 @@ static void __disable_runtime(struct rq *rq) } } -static void disable_runtime(struct rq *rq) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&rq->lock, flags); - __disable_runtime(rq); - raw_spin_unlock_irqrestore(&rq->lock, flags); -} - static void __enable_runtime(struct rq *rq) { rt_rq_iter_t iter; @@ -724,37 +715,6 @@ static void __enable_runtime(struct rq *rq) } } -static void enable_runtime(struct rq *rq) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&rq->lock, flags); - __enable_runtime(rq); - raw_spin_unlock_irqrestore(&rq->lock, flags); -} - -int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu) -{ - int cpu = (int)(long)hcpu; - - switch (action) { - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - disable_runtime(cpu_rq(cpu)); - return NOTIFY_OK; - - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - enable_runtime(cpu_rq(cpu)); - return NOTIFY_OK; - - default: - return NOTIFY_DONE; - } -} - static int balance_runtime(struct rt_rq *rt_rq) { int more = 0; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3a6c3cda30a..b2fd12d7108 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -891,7 +891,6 @@ extern void sysrq_sched_debug_show(void); extern void sched_init_granularity(void); extern void update_max_interval(void); extern void update_group_power(struct sched_domain *sd, int cpu); -extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu); extern void init_sched_rt_class(void); extern void init_sched_fair_class(void); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e9648e06b31..1d98c4faeb1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1322,5 +1322,8 @@ source "lib/Kconfig.kgdb" source "lib/Kconfig.kmemcheck" +config TEST_STRING_HELPERS + tristate "Test functions located in the string_helpers module at runtime" + config TEST_KSTRTOX tristate "Test kstrto*() family of functions at runtime" diff --git a/lib/Makefile b/lib/Makefile index a9e4702b256..c8e3f85130b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -22,8 +22,10 @@ lib-y += kobject.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ + gcd.o lcm.o list_sort.o uuid.o flex_array.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o +obj-y += string_helpers.o +obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 310c753cf83..0d169f0c0ee 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -271,7 +271,6 @@ static inline int parse_lineno(const char *str, unsigned int *val) * allow the user to express a query which matches a format * containing embedded spaces. */ -#define isodigit(c) ((c) >= '0' && (c) <= '7') static char *unescape(char *str) { char *in = str; diff --git a/lib/string_helpers.c b/lib/string_helpers.c index dd4ece37269..bcb557670ec 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -2,10 +2,12 @@ * Helpers for formatting and printing strings * * Copyright 31 August 2008 James Bottomley + * Copyright (C) 2013, Intel Corporation */ #include #include #include +#include #include /** @@ -66,3 +68,134 @@ int string_get_size(u64 size, const enum string_size_units units, return 0; } EXPORT_SYMBOL(string_get_size); + +static bool unescape_space(char **src, char **dst) +{ + char *p = *dst, *q = *src; + + switch (*q) { + case 'n': + *p = '\n'; + break; + case 'r': + *p = '\r'; + break; + case 't': + *p = '\t'; + break; + case 'v': + *p = '\v'; + break; + case 'f': + *p = '\f'; + break; + default: + return false; + } + *dst += 1; + *src += 1; + return true; +} + +static bool unescape_octal(char **src, char **dst) +{ + char *p = *dst, *q = *src; + u8 num; + + if (isodigit(*q) == 0) + return false; + + num = (*q++) & 7; + while (num < 32 && isodigit(*q) && (q - *src < 3)) { + num <<= 3; + num += (*q++) & 7; + } + *p = num; + *dst += 1; + *src = q; + return true; +} + +static bool unescape_hex(char **src, char **dst) +{ + char *p = *dst, *q = *src; + int digit; + u8 num; + + if (*q++ != 'x') + return false; + + num = digit = hex_to_bin(*q++); + if (digit < 0) + return false; + + digit = hex_to_bin(*q); + if (digit >= 0) { + q++; + num = (num << 4) | digit; + } + *p = num; + *dst += 1; + *src = q; + return true; +} + +static bool unescape_special(char **src, char **dst) +{ + char *p = *dst, *q = *src; + + switch (*q) { + case '\"': + *p = '\"'; + break; + case '\\': + *p = '\\'; + break; + case 'a': + *p = '\a'; + break; + case 'e': + *p = '\e'; + break; + default: + return false; + } + *dst += 1; + *src += 1; + return true; +} + +int string_unescape(char *src, char *dst, size_t size, unsigned int flags) +{ + char *out = dst; + + while (*src && --size) { + if (src[0] == '\\' && src[1] != '\0' && size > 1) { + src++; + size--; + + if (flags & UNESCAPE_SPACE && + unescape_space(&src, &out)) + continue; + + if (flags & UNESCAPE_OCTAL && + unescape_octal(&src, &out)) + continue; + + if (flags & UNESCAPE_HEX && + unescape_hex(&src, &out)) + continue; + + if (flags & UNESCAPE_SPECIAL && + unescape_special(&src, &out)) + continue; + + *out++ = '\\'; + } + *out++ = *src++; + } + *out = '\0'; + + return out - dst; +} +EXPORT_SYMBOL(string_unescape); diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c new file mode 100644 index 00000000000..6ac48de04c0 --- /dev/null +++ b/lib/test-string_helpers.c @@ -0,0 +1,103 @@ +/* + * Test cases for lib/string_helpers.c module. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +struct test_string { + const char *in; + const char *out; + unsigned int flags; +}; + +static const struct test_string strings[] __initconst = { + { + .in = "\\f\\ \\n\\r\\t\\v", + .out = "\f\\ \n\r\t\v", + .flags = UNESCAPE_SPACE, + }, + { + .in = "\\40\\1\\387\\0064\\05\\040\\8a\\110\\777", + .out = " \001\00387\0064\005 \\8aH?7", + .flags = UNESCAPE_OCTAL, + }, + { + .in = "\\xv\\xa\\x2c\\xD\\x6f2", + .out = "\\xv\n,\ro2", + .flags = UNESCAPE_HEX, + }, + { + .in = "\\h\\\\\\\"\\a\\e\\", + .out = "\\h\\\"\a\e\\", + .flags = UNESCAPE_SPECIAL, + }, +}; + +static void __init test_string_unescape(unsigned int flags, bool inplace) +{ + char in[256]; + char out_test[256]; + char out_real[256]; + int i, p = 0, q_test = 0, q_real = sizeof(out_real); + + for (i = 0; i < ARRAY_SIZE(strings); i++) { + const char *s = strings[i].in; + int len = strlen(strings[i].in); + + /* Copy string to in buffer */ + memcpy(&in[p], s, len); + p += len; + + /* Copy expected result for given flags */ + if (flags & strings[i].flags) { + s = strings[i].out; + len = strlen(strings[i].out); + } + memcpy(&out_test[q_test], s, len); + q_test += len; + } + in[p++] = '\0'; + + /* Call string_unescape and compare result */ + if (inplace) { + memcpy(out_real, in, p); + if (flags == UNESCAPE_ANY) + q_real = string_unescape_any_inplace(out_real); + else + q_real = string_unescape_inplace(out_real, flags); + } else if (flags == UNESCAPE_ANY) { + q_real = string_unescape_any(in, out_real, q_real); + } else { + q_real = string_unescape(in, out_real, q_real, flags); + } + + if (q_real != q_test || memcmp(out_test, out_real, q_test)) { + pr_warn("Test failed: flags = %u\n", flags); + print_hex_dump(KERN_WARNING, "Input: ", + DUMP_PREFIX_NONE, 16, 1, in, p - 1, true); + print_hex_dump(KERN_WARNING, "Expected: ", + DUMP_PREFIX_NONE, 16, 1, out_test, q_test, true); + print_hex_dump(KERN_WARNING, "Got: ", + DUMP_PREFIX_NONE, 16, 1, out_real, q_real, true); + } +} + +static int __init test_string_helpers_init(void) +{ + unsigned int i; + + pr_info("Running tests...\n"); + for (i = 0; i < UNESCAPE_ANY + 1; i++) + test_string_unescape(i, false); + test_string_unescape(get_random_int() % (UNESCAPE_ANY + 1), true); + + return -EINVAL; +} +module_init(test_string_helpers_init); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/mm/ksm.c b/mm/ksm.c index c9ef7988eaa..5294c16d052 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -184,10 +184,10 @@ static unsigned long ksm_pages_unshared; static unsigned long ksm_rmap_items; /* Number of pages ksmd should scan in one batch */ -static unsigned int ksm_thread_pages_to_scan = 100; +static unsigned int ksm_thread_pages_to_scan = 256; /* Milliseconds ksmd should sleep between batches */ -static unsigned int ksm_thread_sleep_millisecs = 20; +static unsigned int ksm_thread_sleep_millisecs = 1500; /* Boolean to indicate whether to use deferred timer or not */ static bool use_deferred_timer; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index e696d5e7b4f..adc9271eff3 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -183,11 +183,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) unsigned long x = 0; for_each_node_state(node, N_HIGH_MEMORY) { + unsigned long nr_pages; struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; - x += zone_page_state(z, NR_FREE_PAGES) + - zone_reclaimable_pages(z) - z->dirty_balance_reserve; + nr_pages = zone_page_state(z, NR_FREE_PAGES) + + zone_reclaimable_pages(z); + /* + * make sure that the number of pages for this node + * is never "negative". + */ + nr_pages -= min(nr_pages, z->dirty_balance_reserve); + x += nr_pages; } /* * Unreclaimable memory (kernel memory or anonymous memory @@ -227,7 +234,7 @@ unsigned long global_dirtyable_memory(void) x -= min(x, dirty_balance_reserve); if (!vm_highmem_is_dirtyable) - x -= highmem_dirtyable_memory(x); + x -= min(x, highmem_dirtyable_memory(x)); return x + 1; /* Ensure that we never return 0 */ } diff --git a/net/core/dev.c b/net/core/dev.c index 1d247e23a23..c095e698d82 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3709,9 +3709,8 @@ static int process_backlog(struct napi_struct *napi, int quota) #endif napi->weight = weight_p; local_irq_disable(); - while (work < quota) { + while (1) { struct sk_buff *skb; - unsigned int qlen; while ((skb = __skb_dequeue(&sd->process_queue))) { local_irq_enable(); @@ -3725,24 +3724,24 @@ static int process_backlog(struct napi_struct *napi, int quota) } rps_lock(sd); - qlen = skb_queue_len(&sd->input_pkt_queue); - if (qlen) - skb_queue_splice_tail_init(&sd->input_pkt_queue, - &sd->process_queue); - - if (qlen < quota - work) { + if (skb_queue_empty(&sd->input_pkt_queue)) { /* * Inline a custom version of __napi_complete(). * only current cpu owns and manipulates this napi, - * and NAPI_STATE_SCHED is the only possible flag set on backlog. - * we can use a plain write instead of clear_bit(), + * and NAPI_STATE_SCHED is the only possible flag set + * on backlog. + * We can use a plain write instead of clear_bit(), * and we dont need an smp_mb() memory barrier. */ list_del(&napi->poll_list); napi->state = 0; + rps_unlock(sd); - quota = work + qlen; + break; } + + skb_queue_splice_tail_init(&sd->input_pkt_queue, + &sd->process_queue); rps_unlock(sd); } local_irq_enable(); diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 76cb304f3f1..fa346b265fa 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1201,7 +1201,6 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall); * key and content are both parsed by cache */ -#define isodigit(c) (isdigit(c) && c <= '7') int qword_get(char **bpp, char *dest, int bufsize) { /* return bytes copied, or -1 on error */ diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 23e9cba8d4d..59f91e3af0f 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -2524,7 +2524,7 @@ static int mls_write_range_helper(struct mls_range *r, void *fp) if (!eq) buf[2] = cpu_to_le32(r->level[1].sens); - BUG_ON(items > (sizeof(buf)/sizeof(buf[0]))); + BUG_ON(items > ARRAY_SIZE(buf)); rc = put_entry(buf, sizeof(u32), items, fp); if (rc) @@ -2865,7 +2865,7 @@ static int role_write(void *vkey, void *datum, void *ptr) if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) buf[items++] = cpu_to_le32(role->bounds); - BUG_ON(items > (sizeof(buf)/sizeof(buf[0]))); + BUG_ON(items > ARRAY_SIZE(buf)); rc = put_entry(buf, sizeof(u32), items, fp); if (rc) @@ -2915,7 +2915,7 @@ static int type_write(void *vkey, void *datum, void *ptr) } else { buf[items++] = cpu_to_le32(typdatum->primary); } - BUG_ON(items > (sizeof(buf) / sizeof(buf[0]))); + BUG_ON(items > ARRAY_SIZE(buf)); rc = put_entry(buf, sizeof(u32), items, fp); if (rc) return rc; @@ -2944,7 +2944,7 @@ static int user_write(void *vkey, void *datum, void *ptr) buf[items++] = cpu_to_le32(usrdatum->value); if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) buf[items++] = cpu_to_le32(usrdatum->bounds); - BUG_ON(items > (sizeof(buf) / sizeof(buf[0]))); + BUG_ON(items > ARRAY_SIZE(buf)); rc = put_entry(buf, sizeof(u32), items, fp); if (rc) return rc; diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 72b20b1089d..e989e40b42c 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -1018,9 +1018,11 @@ static int context_struct_to_string(struct context *context, char **scontext, u3 if (context->len) { *scontext_len = context->len; - *scontext = kstrdup(context->str, GFP_ATOMIC); - if (!(*scontext)) - return -ENOMEM; + if (scontext) { + *scontext = kstrdup(context->str, GFP_ATOMIC); + if (!(*scontext)) + return -ENOMEM; + } return 0; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 4dac0b332a3..36284b8562f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -458,6 +458,8 @@ static void alc_fix_pll(struct hda_codec *codec) spec->pll_coef_idx); val = snd_hda_codec_read(codec, spec->pll_nid, 0, AC_VERB_GET_PROC_COEF, 0); + if (val == -1) + return; snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX, spec->pll_coef_idx); snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_PROC_COEF, @@ -5843,6 +5845,8 @@ static int alc269_parse_auto_config(struct hda_codec *codec) static void alc269_toggle_power_output(struct hda_codec *codec, int power_up) { int val = alc_read_coef_idx(codec, 0x04); + if (val == -1) + return; if (power_up) val |= 1 << 11; else @@ -6273,27 +6277,30 @@ static void alc269_fill_coef(struct hda_codec *codec) if ((alc_get_coef0(codec) & 0x00ff) == 0x017) { val = alc_read_coef_idx(codec, 0x04); /* Power up output pin */ - alc_write_coef_idx(codec, 0x04, val | (1<<11)); + if (val != -1) + alc_write_coef_idx(codec, 0x04, val | (1<<11)); } if ((alc_get_coef0(codec) & 0x00ff) == 0x018) { val = alc_read_coef_idx(codec, 0xd); - if ((val & 0x0c00) >> 10 != 0x1) { + if (val != -1 && (val & 0x0c00) >> 10 != 0x1) { /* Capless ramp up clock control */ alc_write_coef_idx(codec, 0xd, val | (1<<10)); } val = alc_read_coef_idx(codec, 0x17); - if ((val & 0x01c0) >> 6 != 0x4) { + if (val != -1 && (val & 0x01c0) >> 6 != 0x4) { /* Class D power on reset */ alc_write_coef_idx(codec, 0x17, val | (1<<7)); } } val = alc_read_coef_idx(codec, 0xd); /* Class D */ - alc_write_coef_idx(codec, 0xd, val | (1<<14)); + if (val != -1) + alc_write_coef_idx(codec, 0xd, val | (1<<14)); val = alc_read_coef_idx(codec, 0x4); /* HP */ - alc_write_coef_idx(codec, 0x4, val | (1<<11)); + if (val != -1) + alc_write_coef_idx(codec, 0x4, val | (1<<11)); } /* diff --git a/sound/soc/codecs/sound_control_3_gpl.c b/sound/soc/codecs/sound_control_3_gpl.c index 31b705c468a..b189041a349 100644 --- a/sound/soc/codecs/sound_control_3_gpl.c +++ b/sound/soc/codecs/sound_control_3_gpl.c @@ -23,9 +23,7 @@ #include #define SOUND_CONTROL_MAJOR_VERSION 3 -#define SOUND_CONTROL_MINOR_VERSION 5 - -#define REG_SZ 21 +#define SOUND_CONTROL_MINOR_VERSION 6 extern struct snd_soc_codec *fauxsound_codec_ptr; extern int wcd9xxx_hw_revision; @@ -38,9 +36,10 @@ int tabla_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value); +#define REG_SZ 25 static unsigned int cached_regs[] = {6, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0 }; + 0, 0, 0, 0, 0 }; static unsigned int *cache_select(unsigned int reg) { @@ -104,6 +103,18 @@ static unsigned int *cache_select(unsigned int reg) case TABLA_A_CDC_TX10_VOL_CTL_GAIN: out = &cached_regs[20]; break; + case TABLA_A_RX_LINE_1_GAIN: + out = &cached_regs[21]; + break; + case TABLA_A_RX_LINE_2_GAIN: + out = &cached_regs[22]; + break; + case TABLA_A_RX_LINE_3_GAIN: + out = &cached_regs[23]; + break; + case TABLA_A_RX_LINE_4_GAIN: + out = &cached_regs[24]; + break; } return out; } @@ -145,6 +156,10 @@ int snd_hax_reg_access(unsigned int reg) case TABLA_A_CDC_RX5_VOL_CTL_B2_CTL: case TABLA_A_CDC_RX6_VOL_CTL_B2_CTL: case TABLA_A_CDC_RX7_VOL_CTL_B2_CTL: + case TABLA_A_RX_LINE_1_GAIN: + case TABLA_A_RX_LINE_2_GAIN: + case TABLA_A_RX_LINE_3_GAIN: + case TABLA_A_RX_LINE_4_GAIN: if (snd_ctrl_locked > 0) ret = 0; break; @@ -396,7 +411,7 @@ static ssize_t sound_control_rec_locked_store(struct kobject *kobj, static ssize_t sound_control_rec_locked_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", snd_ctrl_locked); + return sprintf(buf, "%d\n", snd_rec_ctrl_locked); } static struct kobj_attribute sound_reg_sel_attribute = diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c index fd04ce13903..540e30bdb79 100644 --- a/sound/soc/pxa/pxa-ssp.c +++ b/sound/soc/pxa/pxa-ssp.c @@ -779,9 +779,7 @@ static int pxa_ssp_remove(struct snd_soc_dai *dai) SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) -#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ - SNDRV_PCM_FMTBIT_S24_LE | \ - SNDRV_PCM_FMTBIT_S32_LE) +#define PXA_SSP_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops pxa_ssp_dai_ops = { .startup = pxa_ssp_startup, diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c index 191b0c1dc3e..89e43216d56 100644 --- a/virt/kvm/iommu.c +++ b/virt/kvm/iommu.c @@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, return pfn; } +static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) +{ + unsigned long i; + + for (i = 0; i < npages; ++i) + kvm_release_pfn_clean(pfn + i); +} + int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn, end_gfn; @@ -121,6 +129,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); + kvm_unpin_pages(kvm, pfn, page_size); goto unmap_pages; } @@ -132,7 +141,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) return 0; unmap_pages: - kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); + kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); return r; } @@ -274,14 +283,6 @@ int kvm_iommu_map_guest(struct kvm *kvm) return r; } -static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) -{ - unsigned long i; - - for (i = 0; i < npages; ++i) - kvm_release_pfn_clean(pfn + i); -} - static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) {