Permalink
Browse files

Merge branch 'various_JB' into glados-cfs_JB

  • Loading branch information...
2 parents 91186ef + 2e536ea commit 213e254aa38a9876cdc92924ea817937bb1ea62a @Ezekeel committed Jul 25, 2012
Showing with 4,366 additions and 490 deletions.
  1. +2 −0 Documentation/00-INDEX
  2. +183 −0 Documentation/crc32.txt
  3. +6 −3 Makefile
  4. +1 −2 arch/arm/Kconfig
  5. +138 −0 arch/arm/include/asm/rwsem.h
  6. +75 −0 arch/arm/mach-omap2/Kconfig
  7. +2 −0 arch/arm/mach-omap2/Makefile
  8. +5 −1 arch/arm/mach-omap2/board-tuna-connector.c
  9. +21 −0 arch/arm/mach-omap2/board-tuna.c
  10. +1 −1 arch/arm/mach-omap2/clock44xx_data.c
  11. +11 −3 arch/arm/mach-omap2/cpuidle44xx.c
  12. +70 −0 arch/arm/mach-omap2/fastchg.c
  13. +23 −2 arch/arm/mach-omap2/omap2plus-cpufreq.c
  14. +4 −0 arch/arm/mach-omap2/omap4_trim_quirks.c
  15. +14 −0 arch/arm/mach-omap2/omap_opp_data.h
  16. +115 −0 arch/arm/mach-omap2/opp4xxx_data.c
  17. +38 −1 arch/arm/mach-omap2/sr_device.c
  18. +4 −0 arch/arm/mach-omap2/voltage.h
  19. +1 −0 arch/x86/include/asm/thread_info.h
  20. +1 −0 crypto/Kconfig
  21. +3 −91 crypto/crc32c.c
  22. +9 −0 drivers/misc/fsa9480.c
  23. +8 −0 drivers/net/wireless/bcmdhd/Kconfig
  24. +12 −0 drivers/net/wireless/bcmdhd/dhd_linux.c
  25. +6 −0 drivers/video/omap2/dss/Kconfig
  26. +51 −0 drivers/video/omap2/dss/dispc.c
  27. +3 −0 drivers/video/omap2/dss/dispc.h
  28. +3 −0 drivers/video/omap2/dss/dss.h
  29. +707 −0 drivers/video/omap2/dss/gammatable.h
  30. +51 −1 drivers/video/omap2/dss/manager.c
  31. +2 −0 include/linux/crc32.h
  32. +21 −0 include/linux/fastchg.h
  33. +7 −1 include/linux/hardirq.h
  34. +80 −0 include/linux/jrcu.h
  35. +9 −2 include/linux/kernel.h
  36. +226 −0 include/linux/memcopy.h
  37. +1 −1 include/linux/mm.h
  38. +29 −2 include/linux/preempt.h
  39. +2 −0 include/linux/rcupdate.h
  40. +2 −0 include/linux/sched.h
  41. +3 −0 include/video/omapdss.h
  42. +69 −0 init/Kconfig
  43. +1 −0 kernel/Makefile
  44. +772 −0 kernel/jrcu.c
  45. +41 −2 kernel/sched.c
  46. +50 −4 lib/Kconfig
  47. +2 −1 lib/Makefile
  48. +953 −326 lib/crc32.c
  49. +48 −8 lib/crc32defs.h
  50. +61 −20 lib/gen_crc32table.c
  51. +403 −0 lib/memcopy.c
  52. +13 −17 lib/string.c
  53. +0 −1 scripts/setlocalversion
  54. +3 −0 security/smc/Makefile
View
@@ -104,6 +104,8 @@ cpuidle/
- info on CPU_IDLE, CPU idle state management subsystem.
cputopology.txt
- documentation on how CPU topology info is exported via sysfs.
+crc32.txt
+ - brief tutorial on CRC computation
cris/
- directory with info about Linux on CRIS architecture.
crypto/
View
@@ -0,0 +1,183 @@
+A brief CRC tutorial.
+
+A CRC is a long-division remainder. You add the CRC to the message,
+and the whole thing (message+CRC) is a multiple of the given
+CRC polynomial. To check the CRC, you can either check that the
+CRC matches the recomputed value, *or* you can check that the
+remainder computed on the message+CRC is 0. This latter approach
+is used by a lot of hardware implementations, and is why so many
+protocols put the end-of-frame flag after the CRC.
+
+It's actually the same long division you learned in school, except that
+- We're working in binary, so the digits are only 0 and 1, and
+- When dividing polynomials, there are no carries. Rather than add and
+ subtract, we just xor. Thus, we tend to get a bit sloppy about
+ the difference between adding and subtracting.
+
+Like all division, the remainder is always smaller than the divisor.
+To produce a 32-bit CRC, the divisor is actually a 33-bit CRC polynomial.
+Since it's 33 bits long, bit 32 is always going to be set, so usually the
+CRC is written in hex with the most significant bit omitted. (If you're
+familiar with the IEEE 754 floating-point format, it's the same idea.)
+
+Note that a CRC is computed over a string of *bits*, so you have
+to decide on the endianness of the bits within each byte. To get
+the best error-detecting properties, this should correspond to the
+order they're actually sent. For example, standard RS-232 serial is
+little-endian; the most significant bit (sometimes used for parity)
+is sent last. And when appending a CRC word to a message, you should
+do it in the right order, matching the endianness.
+
+Just like with ordinary division, you proceed one digit (bit) at a time.
+Each step of the division, division, you take one more digit (bit) of the
+dividend and append it to the current remainder. Then you figure out the
+appropriate multiple of the divisor to subtract to being the remainder
+back into range. In binary, this is easy - it has to be either 0 or 1,
+and to make the XOR cancel, it's just a copy of bit 32 of the remainder.
+
+When computing a CRC, we don't care about the quotient, so we can
+throw the quotient bit away, but subtract the appropriate multiple of
+the polynomial from the remainder and we're back to where we started,
+ready to process the next bit.
+
+A big-endian CRC written this way would be coded like:
+for (i = 0; i < input_bits; i++) {
+ multiple = remainder & 0x80000000 ? CRCPOLY : 0;
+ remainder = (remainder << 1 | next_input_bit()) ^ multiple;
+}
+
+Notice how, to get at bit 32 of the shifted remainder, we look
+at bit 31 of the remainder *before* shifting it.
+
+But also notice how the next_input_bit() bits we're shifting into
+the remainder don't actually affect any decision-making until
+32 bits later. Thus, the first 32 cycles of this are pretty boring.
+Also, to add the CRC to a message, we need a 32-bit-long hole for it at
+the end, so we have to add 32 extra cycles shifting in zeros at the
+end of every message,
+
+These details lead to a standard trick: rearrange merging in the
+next_input_bit() until the moment it's needed. Then the first 32 cycles
+can be precomputed, and merging in the final 32 zero bits to make room
+for the CRC can be skipped entirely. This changes the code to:
+
+for (i = 0; i < input_bits; i++) {
+ remainder ^= next_input_bit() << 31;
+ multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
+ remainder = (remainder << 1) ^ multiple;
+}
+
+With this optimization, the little-endian code is particularly simple:
+for (i = 0; i < input_bits; i++) {
+ remainder ^= next_input_bit();
+ multiple = (remainder & 1) ? CRCPOLY : 0;
+ remainder = (remainder >> 1) ^ multiple;
+}
+
+The most significant coefficient of the remainder polynomial is stored
+in the least significant bit of the binary "remainder" variable.
+The other details of endianness have been hidden in CRCPOLY (which must
+be bit-reversed) and next_input_bit().
+
+As long as next_input_bit is returning the bits in a sensible order, we don't
+*have* to wait until the last possible moment to merge in additional bits.
+We can do it 8 bits at a time rather than 1 bit at a time:
+for (i = 0; i < input_bytes; i++) {
+ remainder ^= next_input_byte() << 24;
+ for (j = 0; j < 8; j++) {
+ multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
+ remainder = (remainder << 1) ^ multiple;
+ }
+}
+
+Or in little-endian:
+for (i = 0; i < input_bytes; i++) {
+ remainder ^= next_input_byte();
+ for (j = 0; j < 8; j++) {
+ multiple = (remainder & 1) ? CRCPOLY : 0;
+ remainder = (remainder >> 1) ^ multiple;
+ }
+}
+
+If the input is a multiple of 32 bits, you can even XOR in a 32-bit
+word at a time and increase the inner loop count to 32.
+
+You can also mix and match the two loop styles, for example doing the
+bulk of a message byte-at-a-time and adding bit-at-a-time processing
+for any fractional bytes at the end.
+
+To reduce the number of conditional branches, software commonly uses
+the byte-at-a-time table method, popularized by Dilip V. Sarwate,
+"Computation of Cyclic Redundancy Checks via Table Look-Up", Comm. ACM
+v.31 no.8 (August 1998) p. 1008-1013.
+
+Here, rather than just shifting one bit of the remainder to decide
+in the correct multiple to subtract, we can shift a byte at a time.
+This produces a 40-bit (rather than a 33-bit) intermediate remainder,
+and the correct multiple of the polynomial to subtract is found using
+a 256-entry lookup table indexed by the high 8 bits.
+
+(The table entries are simply the CRC-32 of the given one-byte messages.)
+
+When space is more constrained, smaller tables can be used, e.g. two
+4-bit shifts followed by a lookup in a 16-entry table.
+
+It is not practical to process much more than 8 bits at a time using this
+technique, because tables larger than 256 entries use too much memory and,
+more importantly, too much of the L1 cache.
+
+To get higher software performance, a "slicing" technique can be used.
+See "High Octane CRC Generation with the Intel Slicing-by-8 Algorithm",
+ftp://download.intel.com/technology/comms/perfnet/download/slicing-by-8.pdf
+
+This does not change the number of table lookups, but does increase
+the parallelism. With the classic Sarwate algorithm, each table lookup
+must be completed before the index of the next can be computed.
+
+A "slicing by 2" technique would shift the remainder 16 bits at a time,
+producing a 48-bit intermediate remainder. Rather than doing a single
+lookup in a 65536-entry table, the two high bytes are looked up in
+two different 256-entry tables. Each contains the remainder required
+to cancel out the corresponding byte. The tables are different because the
+polynomials to cancel are different. One has non-zero coefficients from
+x^32 to x^39, while the other goes from x^40 to x^47.
+
+Since modern processors can handle many parallel memory operations, this
+takes barely longer than a single table look-up and thus performs almost
+twice as fast as the basic Sarwate algorithm.
+
+This can be extended to "slicing by 4" using 4 256-entry tables.
+Each step, 32 bits of data is fetched, XORed with the CRC, and the result
+broken into bytes and looked up in the tables. Because the 32-bit shift
+leaves the low-order bits of the intermediate remainder zero, the
+final CRC is simply the XOR of the 4 table look-ups.
+
+But this still enforces sequential execution: a second group of table
+look-ups cannot begin until the previous groups 4 table look-ups have all
+been completed. Thus, the processor's load/store unit is sometimes idle.
+
+To make maximum use of the processor, "slicing by 8" performs 8 look-ups
+in parallel. Each step, the 32-bit CRC is shifted 64 bits and XORed
+with 64 bits of input data. What is important to note is that 4 of
+those 8 bytes are simply copies of the input data; they do not depend
+on the previous CRC at all. Thus, those 4 table look-ups may commence
+immediately, without waiting for the previous loop iteration.
+
+By always having 4 loads in flight, a modern superscalar processor can
+be kept busy and make full use of its L1 cache.
+
+Two more details about CRC implementation in the real world:
+
+Normally, appending zero bits to a message which is already a multiple
+of a polynomial produces a larger multiple of that polynomial. Thus,
+a basic CRC will not detect appended zero bits (or bytes). To enable
+a CRC to detect this condition, it's common to invert the CRC before
+appending it. This makes the remainder of the message+crc come out not
+as zero, but some fixed non-zero value. (The CRC of the inversion
+pattern, 0xffffffff.)
+
+The same problem applies to zero bits prepended to the message, and a
+similar solution is used. Instead of starting the CRC computation with
+a remainder of 0, an initial remainder of all ones is used. As long as
+you start the same way on decoding, it doesn't make a difference.
+
View
@@ -347,10 +347,10 @@ CHECK = sparse
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void $(CF)
-CFLAGS_MODULE =
+CFLAGS_MODULE = -DMODULE -mcpu=cortex-a9 -mfpu=neon -funswitch-loops -fpredictive-commoning -fgcse-after-reload -ftree-vectorize -fipa-cp-clone -fsingle-precision-constant -pipe
AFLAGS_MODULE =
LDFLAGS_MODULE =
-CFLAGS_KERNEL =
+CFLAGS_KERNEL = -funswitch-loops -fpredictive-commoning -fgcse-after-reload -ftree-vectorize -fipa-cp-clone -fsingle-precision-constant -pipe
AFLAGS_KERNEL =
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage
@@ -368,7 +368,10 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common \
-Werror-implicit-function-declaration \
-Wno-format-security \
- -fno-delete-null-pointer-checks
+ -fno-delete-null-pointer-checks \
+ -mfpu=neon \
+ -mcpu=cortex-a9 \
+ -mno-unaligned-access
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
KBUILD_AFLAGS := -D__ASSEMBLY__
View
@@ -137,10 +137,9 @@ config GENERIC_LOCKBREAK
config RWSEM_GENERIC_SPINLOCK
bool
- default y
config RWSEM_XCHGADD_ALGORITHM
- bool
+ def_bool y
config ARCH_HAS_ILOG2_U32
bool
@@ -0,0 +1,138 @@
+/* rwsem.h: R/W semaphores implemented using ARM atomic functions.
+ *
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _ASM_ARM_RWSEM_H
+#define _ASM_ARM_RWSEM_H
+
+#ifndef _LINUX_RWSEM_H
+#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
+#endif
+
+#ifdef __KERNEL__
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+#define RWSEM_UNLOCKED_VALUE 0x00000000
+#define RWSEM_ACTIVE_BIAS 0x00000001
+#define RWSEM_ACTIVE_MASK 0x0000ffff
+#define RWSEM_WAITING_BIAS (-0x00010000)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (atomic_inc_return((atomic_t *)(&sem->count)) < 0)
+ rwsem_down_read_failed(sem);
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ while ((tmp = sem->count) >= 0) {
+ if (tmp == cmpxchg(&sem->count, tmp,
+ tmp + RWSEM_ACTIVE_READ_BIAS)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count));
+ if (tmp != RWSEM_ACTIVE_WRITE_BIAS)
+ rwsem_down_write_failed(sem);
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = atomic_dec_return((atomic_t *)(&sem->count));
+ if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count)) < 0)
+ rwsem_wake(sem);
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+{
+ atomic_add(delta, (atomic_t *)(&sem->count));
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
+ if (tmp < 0)
+ rwsem_downgrade_wake(sem);
+}
+
+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+ __down_write(sem);
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+{
+ return atomic_add_return(delta, (atomic_t *)(&sem->count));
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_ARM_RWSEM_H */
Oops, something went wrong.

0 comments on commit 213e254

Please sign in to comment.