Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Browse files Browse the repository at this point in the history
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
  arch/tile: don't validate CROSS_COMPILE needlessly
  arch/tile: export only COMMAND_LINE_SIZE to userspace.
  arch/tile: rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN
  arch/tile: Rename the hweight() implementations to __arch_hweight()
  arch/tile: extend syscall ABI to set r1 on return as well.
  arch/tile: Various cleanups.
  arch/tile: support backtracing on TILE-Gx
  arch/tile: Fix a couple of issues with the COMPAT code for TILE-Gx.
  arch/tile: Use separate, better minsec values for clocksource and sched_clock.
  arch/tile: correct a bug in freeing bootmem by VA for the optional second initrd.
  arch: tile: mm: pgtable.c: Removed duplicated #include
  arch: tile: kernel/proc.c Removed duplicated #include
  Add fanotify syscalls to <asm-generic/unistd.h>.
  arch/tile: support new kunmap_atomic() naming convention.
  tile: remove unused ISA_DMA_THRESHOLD define

Conflicts in arch/tile/configs/tile_defconfig (pick the mainline version
with the reduced defconfig).
  • Loading branch information
torvalds committed Aug 16, 2010
2 parents d782437 + a5854dd commit 7a1b29a
Show file tree
Hide file tree
Showing 30 changed files with 218 additions and 206 deletions.
20 changes: 11 additions & 9 deletions arch/tile/Makefile
Expand Up @@ -8,20 +8,22 @@
# for "archclean" and "archdep" for cleaning up and making dependencies for
# this architecture

ifeq ($(CROSS_COMPILE),)
# If building with TILERA_ROOT set (i.e. using the Tilera Multicore
# Development Environment) we can set CROSS_COMPILE based on that.
ifdef TILERA_ROOT
CROSS_COMPILE = $(TILERA_ROOT)/bin/tile-
endif
endif

# If we're not cross-compiling, make sure we're on the right architecture.
# Only bother to test for a few common targets, to avoid useless errors.
ifeq ($(CROSS_COMPILE),)
HOST_ARCH = $(shell uname -m)
ifneq ($(HOST_ARCH),$(ARCH))
ifdef TILERA_ROOT
CROSS_COMPILE := $(TILERA_ROOT)/bin/tile-
else
goals := $(if $(MAKECMDGOALS), $(MAKECMDGOALS), all)
ifneq ($(strip $(filter vmlinux modules all,$(goals))),)
HOST_ARCH := $(shell uname -m)
ifneq ($(HOST_ARCH),$(ARCH))
$(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH))
endif
endif
endif
endif
endif


Expand Down
4 changes: 1 addition & 3 deletions arch/tile/include/arch/abi.h
Expand Up @@ -59,9 +59,7 @@
* The ABI requires callers to allocate a caller state save area of
* this many bytes at the bottom of each stack frame.
*/
#ifdef __tile__
#define C_ABI_SAVE_AREA_SIZE (2 * __SIZEOF_POINTER__)
#endif
#define C_ABI_SAVE_AREA_SIZE (2 * (CHIP_WORD_SIZE() / 8))

/**
* The operand to an 'info' opcode directing the backtracer to not
Expand Down
37 changes: 0 additions & 37 deletions arch/tile/include/asm/atomic_32.h
Expand Up @@ -255,43 +255,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
#define smp_mb__after_atomic_dec() do { } while (0)
#define smp_mb__after_atomic_inc() do { } while (0)


/*
* Support "tns" atomic integers. These are atomic integers that can
* hold any value but "1". They are more efficient than regular atomic
* operations because the "lock" (aka acquire) step is a single "tns"
* in the uncontended case, and the "unlock" (aka release) step is a
* single "store" without an mf. (However, note that on tilepro the
* "tns" will evict the local cache line, so it's not all upside.)
*
* Note that you can ONLY observe the value stored in the pointer
* using these operations; a direct read of the value may confusingly
* return the special value "1".
*/

int __tns_atomic_acquire(atomic_t *);
void __tns_atomic_release(atomic_t *p, int v);

static inline void tns_atomic_set(atomic_t *v, int i)
{
__tns_atomic_acquire(v);
__tns_atomic_release(v, i);
}

static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n)
{
int ret = __tns_atomic_acquire(v);
__tns_atomic_release(v, (ret == o) ? n : ret);
return ret;
}

static inline int tns_atomic_xchg(atomic_t *v, int n)
{
int ret = __tns_atomic_acquire(v);
__tns_atomic_release(v, n);
return ret;
}

#endif /* !__ASSEMBLY__ */

/*
Expand Down
4 changes: 3 additions & 1 deletion arch/tile/include/asm/backtrace.h
Expand Up @@ -21,7 +21,9 @@

#include <arch/chip.h>

#if CHIP_VA_WIDTH() > 32
#if defined(__tile__)
typedef unsigned long VirtualAddress;
#elif CHIP_VA_WIDTH() > 32
typedef unsigned long long VirtualAddress;
#else
typedef unsigned int VirtualAddress;
Expand Down
9 changes: 5 additions & 4 deletions arch/tile/include/asm/bitops.h
Expand Up @@ -98,26 +98,27 @@ static inline int fls64(__u64 w)
return (sizeof(__u64) * 8) - __builtin_clzll(w);
}

static inline unsigned int hweight32(unsigned int w)
static inline unsigned int __arch_hweight32(unsigned int w)
{
return __builtin_popcount(w);
}

static inline unsigned int hweight16(unsigned int w)
static inline unsigned int __arch_hweight16(unsigned int w)
{
return __builtin_popcount(w & 0xffff);
}

static inline unsigned int hweight8(unsigned int w)
static inline unsigned int __arch_hweight8(unsigned int w)
{
return __builtin_popcount(w & 0xff);
}

static inline unsigned long hweight64(__u64 w)
static inline unsigned long __arch_hweight64(__u64 w)
{
return __builtin_popcountll(w);
}

#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
Expand Down
5 changes: 2 additions & 3 deletions arch/tile/include/asm/cache.h
Expand Up @@ -27,11 +27,10 @@
#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)

/*
* TILE-Gx is fully coherents so we don't need to define
* ARCH_KMALLOC_MINALIGN.
* TILE-Gx is fully coherent so we don't need to define ARCH_DMA_MINALIGN.
*/
#ifndef __tilegx__
#define ARCH_KMALLOC_MINALIGN L2_CACHE_BYTES
#define ARCH_DMA_MINALIGN L2_CACHE_BYTES
#endif

/* use the cache line size for the L2, which is where it counts */
Expand Down
2 changes: 1 addition & 1 deletion arch/tile/include/asm/highmem.h
Expand Up @@ -60,7 +60,7 @@ void *kmap_fix_kpte(struct page *page, int finished);
/* This macro is used only in map_new_virtual() to map "page". */
#define kmap_prot page_to_kpgprot(page)

void kunmap_atomic(void *kvaddr, enum km_type type);
void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
Expand Down
6 changes: 5 additions & 1 deletion arch/tile/include/asm/page.h
Expand Up @@ -129,6 +129,11 @@ static inline u64 pmd_val(pmd_t pmd)

#endif

static inline __attribute_const__ int get_order(unsigned long size)
{
return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT);
}

#endif /* !__ASSEMBLY__ */

#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
Expand Down Expand Up @@ -332,7 +337,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
(VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>

#endif /* __KERNEL__ */

Expand Down
21 changes: 0 additions & 21 deletions arch/tile/include/asm/scatterlist.h
@@ -1,22 +1 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/

#ifndef _ASM_TILE_SCATTERLIST_H
#define _ASM_TILE_SCATTERLIST_H

#define ISA_DMA_THRESHOLD (~0UL)

#include <asm-generic/scatterlist.h>

#endif /* _ASM_TILE_SCATTERLIST_H */
8 changes: 6 additions & 2 deletions arch/tile/include/asm/setup.h
Expand Up @@ -15,6 +15,10 @@
#ifndef _ASM_TILE_SETUP_H
#define _ASM_TILE_SETUP_H

#define COMMAND_LINE_SIZE 2048

#ifdef __KERNEL__

#include <linux/pfn.h>
#include <linux/init.h>

Expand All @@ -23,10 +27,10 @@
*/
#define MAXMEM_PFN PFN_DOWN(MAXMEM)

#define COMMAND_LINE_SIZE 2048

void early_panic(const char *fmt, ...);
void warn_early_printk(void);
void __init disable_early_printk(void);

#endif /* __KERNEL__ */

#endif /* _ASM_TILE_SETUP_H */
4 changes: 4 additions & 0 deletions arch/tile/include/asm/siginfo.h
Expand Up @@ -17,6 +17,10 @@

#define __ARCH_SI_TRAPNO

#ifdef __LP64__
# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
#endif

#include <asm-generic/siginfo.h>

/*
Expand Down
4 changes: 2 additions & 2 deletions arch/tile/include/asm/uaccess.h
Expand Up @@ -389,14 +389,14 @@ static inline unsigned long __must_check copy_from_user(void *to,
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
extern unsigned long __copy_in_user_asm(
extern unsigned long __copy_in_user_inatomic(
void __user *to, const void __user *from, unsigned long n);

static inline unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_sleep();
return __copy_in_user_asm(to, from, n);
return __copy_in_user_inatomic(to, from, n);
}

static inline unsigned long __must_check
Expand Down
8 changes: 4 additions & 4 deletions arch/tile/include/hv/hypervisor.h
Expand Up @@ -532,11 +532,11 @@ void hv_disable_intr(HV_IntrMask disab_mask);
*/
void hv_clear_intr(HV_IntrMask clear_mask);

/** Assert a set of device interrupts.
/** Raise a set of device interrupts.
*
* @param assert_mask Bitmap of interrupts to clear.
* @param raise_mask Bitmap of interrupts to raise.
*/
void hv_assert_intr(HV_IntrMask assert_mask);
void hv_raise_intr(HV_IntrMask raise_mask);

/** Trigger a one-shot interrupt on some tile
*
Expand Down Expand Up @@ -1712,7 +1712,7 @@ typedef struct
* @param cache_control This argument allows you to specify a length of
* physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN).
* You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache.
* You can "or" in HV_FLUSH_EVICT_LI1 to flush the whole LII cache.
* You can "or" in HV_FLUSH_EVICT_L1I to flush the whole L1I cache.
* HV_FLUSH_ALL flushes all caches.
* @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of
* tile indices to perform cache flush on. The low bit of the first
Expand Down

0 comments on commit 7a1b29a

Please sign in to comment.