Permalink
Browse files

Merge pull request #112 from robertwb/release

Revert Numpy 1.7 hacks
  • Loading branch information...
2 parents ff7a798 + fc2d1d8 commit 4d0bb3bcff1a1ccd685a29a2c5874bf524c9042d @markflorisson markflorisson committed Apr 15, 2012
@@ -34,8 +34,6 @@
import Options
from Cython import Utils
from Annotate import AnnotationItem
-from NumpySupport import numpy_transform_attribute_node, \
- should_apply_numpy_hack
from Cython.Debugging import print_call_chain
from DebugFlags import debug_disposal_code, debug_temp_alloc, \
@@ -4460,16 +4458,6 @@ def analyse_attribute(self, env, obj_type = None):
# method of an extension type, so we treat it like a Python
# attribute.
pass
- # NumPy hack
- if (getattr(self.obj, 'type', None) and obj_type.is_extension_type
- and should_apply_numpy_hack(obj_type)):
- replacement_node = numpy_transform_attribute_node(self)
- # Since we can't actually replace our node yet, we only grasp its
- # type, and then the replacement happens in
- # AnalyseExpresssionsTransform...
- self.type = replacement_node.type
- if replacement_node is not self:
- return
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
View
@@ -24,10 +24,6 @@
verbose = 0
-standard_include_path = os.path.abspath(os.path.normpath(
- os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
-
-
class CompilationData(object):
# Bundles the information that is passed from transform to transform.
# (For now, this is only)
@@ -74,6 +70,8 @@ def __init__(self, include_directories, compiler_directives, cpp=False,
self.pxds = {} # full name -> node tree
+ standard_include_path = os.path.abspath(os.path.normpath(
+ os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
self.include_directories = include_directories + [standard_include_path]
self.set_language_level(language_level)
@@ -1,67 +0,0 @@
-# The hacks that are specific for NumPy. These were introduced because
-# the NumPy ABI changed so that the shape, ndim, strides, etc. fields were
-# no longer available, however the use of these were so entrenched in
-# Cython codes
-import os
-from StringEncoding import EncodedString
-
-def should_apply_numpy_hack(obj_type):
- if not obj_type.is_extension_type or obj_type.objstruct_cname != 'PyArrayObject':
- return False
- from Scanning import FileSourceDescriptor
- from Main import standard_include_path
- type_source = obj_type.pos[0]
- if isinstance(type_source, FileSourceDescriptor):
- type_source_path = os.path.abspath(os.path.normpath(type_source.filename))
- return type_source_path == os.path.join(standard_include_path, 'numpy.pxd')
- else:
- return False
-
-def numpy_transform_attribute_node(node):
- import PyrexTypes
- import ExprNodes
- assert isinstance(node, ExprNodes.AttributeNode)
-
- if node.obj.type.objstruct_cname != 'PyArrayObject':
- return node
-
- pos = node.pos
- numpy_pxd_scope = node.obj.type.scope.parent_scope
-
- def macro_call_node(numpy_macro_name):
- array_node = node.obj
- func_entry = numpy_pxd_scope.entries[numpy_macro_name]
- function_name_node = ExprNodes.NameNode(
- name=EncodedString(numpy_macro_name),
- pos=pos,
- entry=func_entry,
- is_called=1,
- type=func_entry.type,
- cf_maybe_null=False,
- cf_is_null=False)
-
- call_node = ExprNodes.SimpleCallNode(
- pos=pos,
- function=function_name_node,
- name=EncodedString(numpy_macro_name),
- args=[array_node],
- type=func_entry.type.return_type,
- analysed=True)
- return call_node
-
-
- if node.attribute == u'ndim':
- result = macro_call_node(u'PyArray_NDIM')
- elif node.attribute == u'data':
- call_node = macro_call_node(u'PyArray_DATA')
- cast_node = ExprNodes.TypecastNode(pos,
- type=PyrexTypes.c_char_ptr_type,
- operand=call_node)
- result = cast_node
- elif node.attribute == u'shape':
- result = macro_call_node(u'PyArray_DIMS')
- elif node.attribute == u'strides':
- result = macro_call_node(u'PyArray_STRIDES')
- else:
- result = node
- return result
@@ -18,8 +18,7 @@
from Cython.Compiler.StringEncoding import EncodedString
from Cython.Compiler.Errors import error, warning, CompileError, InternalError
from Cython.Compiler.Code import UtilityCode
-from Cython.Compiler.NumpySupport import (should_apply_numpy_hack,
- numpy_transform_attribute_node)
+
import copy
@@ -1748,7 +1747,6 @@ def create_Property(self, entry):
class AnalyseExpressionsTransform(CythonTransform):
- # Also handles NumPy
def visit_ModuleNode(self, node):
self.env_stack = [node.scope]
@@ -1790,21 +1788,10 @@ def visit_IndexNode(self, node):
elif node.memslice_ellipsis_noop:
# memoryviewslice[...] expression, drop the IndexNode
node = node.base
- return node
-
- def visit_AttributeNode(self, node):
- # Note: Expression analysis for attributes has already happened
- # at this point (by recursive calls starting from FuncDefNode)
- #print node.dump()
- #return node
- type = node.obj.type
- if (not node.type.is_error and type.is_extension_type and
- should_apply_numpy_hack(type)):
- node = numpy_transform_attribute_node(node)
- self.visitchildren(node)
return node
+
class FindInvalidUseOfFusedTypes(CythonTransform):
def visit_FuncDefNode(self, node):
@@ -188,7 +188,6 @@ def create_pipeline(context, mode, exclude_classes=()):
_check_c_declarations,
InlineDefNodeCalls(context),
AnalyseExpressionsTransform(context),
- # AnalyseExpressionsTransform also contains the NumPy-specific support
FindInvalidUseOfFusedTypes(context),
CreateClosureClasses(context), ## After all lookups and type inference
ExpandInplaceOperators(context),
View
@@ -151,9 +151,6 @@ cdef extern from "numpy/arrayobject.h":
ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
- ctypedef struct PyArray_Descr:
- pass
-
ctypedef class numpy.dtype [object PyArray_Descr]:
# Use PyDataType_* macros when possible, however there are no macros
# for accessing some of the fields, so some are defined. Please
@@ -180,11 +177,15 @@ cdef extern from "numpy/arrayobject.h":
ctypedef class numpy.ndarray [object PyArrayObject]:
cdef __cythonbufferdefaults__ = {"mode": "strided"}
- # Note: The fields are no longer defined, please use accessor
- # functions. Cython special-cases/hacks the data, ndim, shape
- # and stride attributes of the ndarray to use accessor
- # functions for backwards compatability and convenience.
-
+ cdef:
+ # Only taking a few of the most commonly used and stable fields.
+ # One should use PyArray_* macros instead to access the C fields.
+ char *data
+ int ndim "nd"
+ npy_intp *shape "dimensions"
+ npy_intp *strides
+ dtype descr
+ PyObject* base
# Note: This syntax (function definition in pxd files) is an
# experimental exception made for __getbuffer__ and __releasebuffer__
@@ -235,7 +236,7 @@ cdef extern from "numpy/arrayobject.h":
cdef int t
cdef char* f = NULL
- cdef dtype descr = get_array_dtype(self)
+ cdef dtype descr = self.descr
cdef list stack
cdef int offset
@@ -375,29 +376,20 @@ cdef extern from "numpy/arrayobject.h":
bint PyArray_ISWRITEABLE(ndarray m)
bint PyArray_ISALIGNED(ndarray m)
- int PyArray_NDIM(ndarray) nogil
+ int PyArray_NDIM(ndarray)
bint PyArray_ISONESEGMENT(ndarray)
bint PyArray_ISFORTRAN(ndarray)
int PyArray_FORTRANIF(ndarray)
- void* PyArray_DATA(ndarray) nogil
- char* PyArray_BYTES(ndarray) nogil
- npy_intp* PyArray_DIMS(ndarray) nogil
- npy_intp* PyArray_STRIDES(ndarray) nogil
- npy_intp PyArray_DIM(ndarray, size_t) nogil
- npy_intp PyArray_STRIDE(ndarray, size_t) nogil
-
- # The two functions below return borrowed references and should
- # be used with care; often you will want to use get_array_base
- # or get_array_dtype (define below) instead from Cython.
- PyObject* PyArray_BASE(ndarray)
- # Cython API of the function below might change! PyArray_DESCR
- # actually returns PyArray_Descr* == pointer-version of dtype,
- # which appears to be difficult to declare properly in Cython;
- # protect it with trailing underscore for now just to avoid having
- # user code depend on it without reading this note.
- PyArray_Descr * PyArray_DESCR_ "PyArray_DESCR"(ndarray)
+ void* PyArray_DATA(ndarray)
+ char* PyArray_BYTES(ndarray)
+ npy_intp* PyArray_DIMS(ndarray)
+ npy_intp* PyArray_STRIDES(ndarray)
+ npy_intp PyArray_DIM(ndarray, size_t)
+ npy_intp PyArray_STRIDE(ndarray, size_t)
+ # object PyArray_BASE(ndarray) wrong refcount semantics
+ # dtype PyArray_DESCR(ndarray) wrong refcount semantics
int PyArray_FLAGS(ndarray)
npy_intp PyArray_ITEMSIZE(ndarray)
int PyArray_TYPE(ndarray arr)
@@ -969,34 +961,18 @@ cdef extern from "numpy/ufuncobject.h":
void import_ufunc()
-# The ability to set the base field of an ndarray seems to be
-# deprecated in NumPy 1.7 (no PyArray_SET_BASE seems to be
-# available). Remove this support and see who complains and how their
-# case could be fixed in 1.7...
-#
-#cdef inline void set_array_base(ndarray arr, object base):
-# cdef PyObject* baseptr
-# if base is None:
-# baseptr = NULL
-# else:
-# Py_INCREF(base) # important to do this before decref below!
-# baseptr = <PyObject*>base
-# Py_XDECREF(arr.base)
-# arr.base = baseptr
+cdef inline void set_array_base(ndarray arr, object base):
+ cdef PyObject* baseptr
+ if base is None:
+ baseptr = NULL
+ else:
+ Py_INCREF(base) # important to do this before decref below!
+ baseptr = <PyObject*>base
+ Py_XDECREF(arr.base)
+ arr.base = baseptr
cdef inline object get_array_base(ndarray arr):
- cdef PyObject *pobj = PyArray_BASE(arr)
- if pobj != NULL:
- obj = <object>pobj
- Py_INCREF(obj)
- return obj
- else:
+ if arr.base is NULL:
return None
-
-cdef inline dtype get_array_dtype(ndarray arr):
- if PyArray_DESCR_(arr) != NULL:
- obj = <object>PyArray_DESCR_(arr)
- Py_INCREF(obj)
- return obj
else:
- return None
+ return <object>arr.base
@@ -1,54 +0,0 @@
-# tag: numpy
-
-
-import numpy as np
-cimport numpy as np
-
-int64_array = np.ones((3, 2), dtype=np.int64)
-
-def f():
- """
- >>> f()
- ndim 2
- data 1
- shape 3 2
- shape[1] 2
- strides 16 8
- """
- cdef np.ndarray x = int64_array
- cdef int i
- cdef Py_ssize_t j, k
- cdef char *p
- # todo: int * p: 23:13: Cannot assign type 'char *' to 'int *'
-
- with nogil:
- i = x.ndim
- print 'ndim', i
-
- with nogil:
- p = x.data
- print 'data', (<np.int64_t*>p)[0]
-
- with nogil:
- j = x.shape[0]
- k = x.shape[1]
- print 'shape', j, k
- # Check that non-typical uses still work
- cdef np.npy_intp *shape
- with nogil:
- shape = x.shape + 1
- print 'shape[1]', shape[0]
-
- with nogil:
- j = x.strides[0]
- k = x.strides[1]
- print 'strides', j, k
-
-def test_non_namenode_attribute_access(obj):
- """
- >>> test_non_namenode_attribute_access(int64_array)
- data 1
- """
- # Try casting, resulting in an AttributeNode with a TypeCastNode as object
- # and 'data' as attribute
- print "data", (<np.int64_t *> (<np.ndarray> obj).data)[0]

0 comments on commit 4d0bb3b

Please sign in to comment.