Skip to content

Commit

Permalink
Merge pull request #4 from moddingg33k/kitkat
Browse files Browse the repository at this point in the history
fixes and suggestions
  • Loading branch information
dazjo committed Aug 19, 2014
2 parents c398cdf + c5d7911 commit 546857a
Show file tree
Hide file tree
Showing 15 changed files with 239 additions and 197 deletions.
3 changes: 3 additions & 0 deletions arch/arm/Kconfig
Expand Up @@ -1711,6 +1711,9 @@ config HZ
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
default 100

config SCHED_HRTICK
def_bool HIGH_RES_TIMERS

config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
Expand Down
44 changes: 38 additions & 6 deletions arch/arm/include/asm/types.h
@@ -1,16 +1,48 @@
#ifndef __ASM_ARM_TYPES_H
#define __ASM_ARM_TYPES_H
#ifndef _ASM_TYPES_H
#define _ASM_TYPES_H

#include <asm-generic/int-ll64.h>

/*
* These aren't exported outside the kernel to avoid name space clashes
* The C99 types uintXX_t that are usually defined in 'stdint.h' are not as
* unambiguous on ARM as you would expect. For the types below, there is a
* difference on ARM between GCC built for bare metal ARM, GCC built for glibc
* and the kernel itself, which results in build errors if you try to build with
* -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h'
* in order to use NEON intrinsics)
*
* As the typedefs for these types in 'stdint.h' are based on builtin defines
* supplied by GCC, we can tweak these to align with the kernel's idea of those
* types, so 'linux/types.h' and 'stdint.h' can be safely included from the same
* source file (provided that -ffreestanding is used).
*
* int32_t uint32_t uintptr_t
* bare metal GCC long unsigned long unsigned int
* glibc GCC int unsigned int unsigned int
* kernel int unsigned int unsigned long
*/
#ifdef __KERNEL__

#define BITS_PER_LONG 32
#ifdef __INT32_TYPE__
#undef __INT32_TYPE__
#define __INT32_TYPE__ int
#endif

#endif /* __KERNEL__ */
#ifdef __UINT32_TYPE__
#undef __UINT32_TYPE__
#define __UINT32_TYPE__ unsigned int
#endif

#ifdef __UINTPTR_TYPE__
#undef __UINTPTR_TYPE__
#define __UINTPTR_TYPE__ unsigned long
#endif

/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#ifdef __KERNEL__

#define BITS_PER_LONG 32

#endif /* __KERNEL__ */
#endif /* _ASM_TYPES_H */
100 changes: 48 additions & 52 deletions arch/arm/lib/memset.S
Expand Up @@ -14,27 +14,15 @@

.text
.align 5
.word 0

1: subs r2, r2, #4 @ 1 do we have enough
blt 5f @ 1 bytes to align with?
cmp r3, #2 @ 1
strltb r1, [r0], #1 @ 1
strleb r1, [r0], #1 @ 1
strb r1, [r0], #1 @ 1
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
/*
* The pointer is now aligned and the length is adjusted. Try doing the
* memset again.
*/

ENTRY(memset)
ands r3, r0, #3 @ 1 unaligned?
bne 1b @ 1
mov ip, r0 @ preserve r0 as return value
bne 6f @ 1
/*
* we know that the pointer in r0 is aligned to a word boundary.
* we know that the pointer in ip is aligned to a word boundary.
*/
orr r1, r1, r1, lsl #8
1: orr r1, r1, r1, lsl #8
orr r1, r1, r1, lsl #16
mov r3, r1
cmp r2, #16
Expand All @@ -43,29 +31,28 @@ ENTRY(memset)
#if ! CALGN(1)+0

/*
* We need an extra register for this loop - save the return address and
* use the LR
* We need 2 extra registers for this loop - use r8 and the LR
*/
str lr, [sp, #-4]!
mov ip, r1
stmfd sp!, {r8, lr}
mov r8, r1
mov lr, r1

2: subs r2, r2, #64
stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time.
stmgeia r0!, {r1, r3, ip, lr}
stmgeia r0!, {r1, r3, ip, lr}
stmgeia r0!, {r1, r3, ip, lr}
stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
stmgeia ip!, {r1, r3, r8, lr}
stmgeia ip!, {r1, r3, r8, lr}
stmgeia ip!, {r1, r3, r8, lr}
bgt 2b
ldmeqfd sp!, {pc} @ Now <64 bytes to go.
ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
tst r2, #32
stmneia r0!, {r1, r3, ip, lr}
stmneia r0!, {r1, r3, ip, lr}
stmneia ip!, {r1, r3, r8, lr}
stmneia ip!, {r1, r3, r8, lr}
tst r2, #16
stmneia r0!, {r1, r3, ip, lr}
ldr lr, [sp], #4
stmneia ip!, {r1, r3, r8, lr}
ldmfd sp!, {r8, lr}

#else

Expand All @@ -74,54 +61,63 @@ ENTRY(memset)
* whole cache lines at once.
*/

stmfd sp!, {r4-r7, lr}
stmfd sp!, {r4-r8, lr}
mov r4, r1
mov r5, r1
mov r6, r1
mov r7, r1
mov ip, r1
mov r8, r1
mov lr, r1

cmp r2, #96
tstgt r0, #31
tstgt ip, #31
ble 3f

and ip, r0, #31
rsb ip, ip, #32
sub r2, r2, ip
movs ip, ip, lsl #(32 - 4)
stmcsia r0!, {r4, r5, r6, r7}
stmmiia r0!, {r4, r5}
tst ip, #(1 << 30)
mov ip, r1
strne r1, [r0], #4
and r8, ip, #31
rsb r8, r8, #32
sub r2, r2, r8
movs r8, r8, lsl #(32 - 4)
stmcsia ip!, {r4, r5, r6, r7}
stmmiia ip!, {r4, r5}
tst r8, #(1 << 30)
mov r8, r1
strne r1, [ip], #4

3: subs r2, r2, #64
stmgeia r0!, {r1, r3-r7, ip, lr}
stmgeia r0!, {r1, r3-r7, ip, lr}
stmgeia ip!, {r1, r3-r8, lr}
stmgeia ip!, {r1, r3-r8, lr}
bgt 3b
ldmeqfd sp!, {r4-r7, pc}
ldmeqfd sp!, {r4-r8, pc}

tst r2, #32
stmneia r0!, {r1, r3-r7, ip, lr}
stmneia ip!, {r1, r3-r8, lr}
tst r2, #16
stmneia r0!, {r4-r7}
ldmfd sp!, {r4-r7, lr}
stmneia ip!, {r4-r7}
ldmfd sp!, {r4-r8, lr}

#endif

4: tst r2, #8
stmneia r0!, {r1, r3}
stmneia ip!, {r1, r3}
tst r2, #4
strne r1, [r0], #4
strne r1, [ip], #4
/*
* When we get here, we've got less than 4 bytes to zero. We
* may have an unaligned pointer as well.
*/
5: tst r2, #2
strneb r1, [r0], #1
strneb r1, [r0], #1
strneb r1, [ip], #1
strneb r1, [ip], #1
tst r2, #1
strneb r1, [r0], #1
strneb r1, [ip], #1
mov pc, lr

6: subs r2, r2, #4 @ 1 do we have enough
blt 5b @ 1 bytes to align with?
cmp r3, #2 @ 1
strltb r1, [ip], #1 @ 1
strleb r1, [ip], #1 @ 1
strb r1, [ip], #1 @ 1
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b
ENDPROC(memset)
5 changes: 3 additions & 2 deletions arch/arm/mach-msm/acpuclock-7627.c
Expand Up @@ -321,10 +321,11 @@ static struct clkctl_acpu_speed pll0_960_pll1_196_pll2_1200_pll4_1401[] = {
};

/* 8625 PLL4 @ 1008MHz with GSM capable modem */
/* Y300-100 */
static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200_pll4_1008_2p0[] = {
{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
{ 0, 61440, ACPU_PLL_1, 1, 3, 7680, 3, 0, 61440 },
{ 0, 122880, ACPU_PLL_1, 1, 1, 15360, 3, 1, 61440 },
{ 1, 122880, ACPU_PLL_1, 1, 1, 15360, 3, 1, 61440 },
{ 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, 1, 61440 },
{ 0, 300000, ACPU_PLL_2, 2, 3, 37500, 3, 2, 122880 },
{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
Expand Down Expand Up @@ -1027,7 +1028,7 @@ static void __devinit select_freq_plan(void)
* are using different clock plan based reprogramming method.
*/
if (cpu_is_msm8625() && pll_mhz[ACPU_PLL_4] == 1008) {
if (pll_mhz[ACPU_PLL_2] == 245)
if (pll_mhz[ACPU_PLL_1] == 245)
acpu_freq_tbl =
pll0_960_pll1_245_pll2_1200_pll4_1008_2p0;
else
Expand Down
62 changes: 62 additions & 0 deletions arch/arm/mach-msm/msm_rq_stats.c
Expand Up @@ -32,6 +32,68 @@
#define DEFAULT_RQ_POLL_JIFFIES 1
#define DEFAULT_DEF_TIMER_JIFFIES 5

struct rq_data {
unsigned int rq_avg;
unsigned long rq_poll_jiffies;
unsigned long def_timer_jiffies;
unsigned long rq_poll_last_jiffy;
unsigned long rq_poll_total_jiffies;
unsigned long def_timer_last_jiffy;
unsigned int def_interval;
int64_t def_start_time;
struct attribute_group *attr_group;
struct kobject *kobj;
struct work_struct def_timer_work;
int init;
};
static struct rq_data rq_info;

static struct workqueue_struct *rq_wq;
static spinlock_t rq_lock;

void msm_update_rq_stats(void)
{
unsigned long flags, jiffy_gap;
unsigned int rq_avg;

if (!rq_info.init)
return;

jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy;
if (jiffy_gap >= rq_info.rq_poll_jiffies) {

spin_lock_irqsave(&rq_lock, flags);

if (!rq_info.rq_avg)
rq_info.rq_poll_total_jiffies = 0;

rq_avg = nr_running() * 10;

if (rq_info.rq_poll_total_jiffies) {
rq_avg = (rq_avg * jiffy_gap) +
(rq_info.rq_avg *
rq_info.rq_poll_total_jiffies);
do_div(rq_avg,
rq_info.rq_poll_total_jiffies + jiffy_gap);
}

rq_info.rq_avg = rq_avg;
rq_info.rq_poll_total_jiffies += jiffy_gap;
rq_info.rq_poll_last_jiffy = jiffies;

spin_unlock_irqrestore(&rq_lock, flags);
}

/*
* Wakeup user if needed
*/
jiffy_gap = jiffies - rq_info.def_timer_last_jiffy;
if (jiffy_gap >= rq_info.def_timer_jiffies) {
rq_info.def_timer_last_jiffy = jiffies;
queue_work(rq_wq, &rq_info.def_timer_work);
}
}

static void def_work_fn(struct work_struct *work)
{
int64_t diff;
Expand Down
4 changes: 4 additions & 0 deletions block/blk-merge.c
Expand Up @@ -491,6 +491,10 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
return false;

/* don't merge file system requests and sanitize requests */
if ((bio->bi_rw & REQ_SANITIZE) != (rq->bio->bi_rw & REQ_SANITIZE))
return false;

/* different data direction or already started, don't merge */
if (bio_data_dir(bio) != rq_data_dir(rq))
return false;
Expand Down
38 changes: 1 addition & 37 deletions block/elevator.c
Expand Up @@ -72,43 +72,7 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
*/
bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
{
if (!rq_mergeable(rq))
return 0;

/*
* Don't merge file system requests and discard requests
*/
if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
return 0;

/*
* Don't merge discard requests and secure discard requests
*/
if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
return 0;

/*
* Don't merge sanitize requests
*/
if ((bio->bi_rw & REQ_SANITIZE) != (rq->bio->bi_rw & REQ_SANITIZE))
return 0;

/*
* different data direction or already started, don't merge
*/
if (bio_data_dir(bio) != rq_data_dir(rq))
return 0;

/*
* must be same device and not a special request
*/
if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
return 0;

/*
* only merge integrity protected bio into ditto rq
*/
if (bio_integrity(bio) != blk_integrity_rq(rq))
if (!blk_rq_merge_ok(rq, bio))
return 0;

if (!elv_iosched_allow_merge(rq, bio))
Expand Down

0 comments on commit 546857a

Please sign in to comment.