From cd4e91422009cc7ffce111421ebe0e28055b8a5c Mon Sep 17 00:00:00 2001 From: Stefan Foulis Date: Thu, 23 Apr 2009 17:05:32 +0200 Subject: [PATCH] initial import --- image_filer/__init__.py | 0 image_filer/admin.py | 33 + image_filer/fields.py | 50 + image_filer/filters.py | 146 ++ image_filer/models.py | 768 +++++++ .../admin/image_filer/image/change_form.html | 25 + image_filer/templates/image_filer/base.html | 50 + .../image_filer/directory_listing.html | 93 + .../templates/image_filer/include/bucket.html | 10 + .../image_filer/include/export_dialog.html | 39 + .../image_filer/include/folderlisting.html | 18 + .../include/new_folder_dialog.html | 40 + .../image_filer/include/new_folder_form.html | 3 + .../image_filer/include/upload_dialog.html | 245 +++ image_filer/urls.py | 15 + image_filer/utils/EXIF.py | 1767 +++++++++++++++++ image_filer/utils/__init__.py | 0 image_filer/utils/files.py | 21 + image_filer/utils/zip.py | 27 + image_filer/utils/zipfile.py | 1418 +++++++++++++ image_filer/views.py | 215 ++ 21 files changed, 4983 insertions(+) create mode 100644 image_filer/__init__.py create mode 100644 image_filer/admin.py create mode 100644 image_filer/fields.py create mode 100644 image_filer/filters.py create mode 100644 image_filer/models.py create mode 100644 image_filer/templates/admin/image_filer/image/change_form.html create mode 100644 image_filer/templates/image_filer/base.html create mode 100644 image_filer/templates/image_filer/directory_listing.html create mode 100644 image_filer/templates/image_filer/include/bucket.html create mode 100644 image_filer/templates/image_filer/include/export_dialog.html create mode 100644 image_filer/templates/image_filer/include/folderlisting.html create mode 100644 image_filer/templates/image_filer/include/new_folder_dialog.html create mode 100644 image_filer/templates/image_filer/include/new_folder_form.html create mode 100644 image_filer/templates/image_filer/include/upload_dialog.html create mode 100644 image_filer/urls.py create mode 100644 image_filer/utils/EXIF.py create mode 100644 image_filer/utils/__init__.py create mode 100644 image_filer/utils/files.py create mode 100644 image_filer/utils/zip.py create mode 100644 image_filer/utils/zipfile.py create mode 100644 image_filer/views.py diff --git a/image_filer/__init__.py b/image_filer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/image_filer/admin.py b/image_filer/admin.py new file mode 100644 index 0000000..96ca374 --- /dev/null +++ b/image_filer/admin.py @@ -0,0 +1,33 @@ +from django.contrib import admin +from models import Folder, Image, FolderPermission, ImagePermission +from models import ImageManipulationProfile, ImageManipulationStep, ImageManipulationTemplate +from models import Bucket, BucketItem + +admin.site.register([FolderPermission, ImagePermission, ImageManipulationTemplate]) + + +class ImageAdmin(admin.ModelAdmin): + list_display = ('label','admin_thumbnail', 'has_all_mandatory_data') + list_per_page = 10 + search_fields = ['name', 'original_filename','default_alt_text','default_caption','usage_restriction_notes','notes'] +admin.site.register(Image, ImageAdmin) + +class FolderAdmin(admin.ModelAdmin): + list_display = ('name', 'owner',) + list_per_page = 20 +admin.site.register(Folder, FolderAdmin) + +class ImageManipulationStepInline(admin.TabularInline): + model = ImageManipulationStep +class ImageManipulationProfileAdmin(admin.ModelAdmin): + inlines = [ ImageManipulationStepInline, ] +admin.site.register(ImageManipulationProfile, ImageManipulationProfileAdmin) + + + +class BucketItemInline(admin.TabularInline): + model = BucketItem +class BucketAdmin(admin.ModelAdmin): + model = Bucket + inlines = [ BucketItemInline, ] +admin.site.register(Bucket, BucketAdmin) \ No newline at end of file diff --git a/image_filer/fields.py b/image_filer/fields.py new file mode 100644 index 0000000..ab7ebc0 --- /dev/null +++ b/image_filer/fields.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# --------------------------------------- fields.py --------------------------------------- # +# http://www.djangosnippets.org/snippets/513/ +# 2009-04-05 Stefan Foulis: added base64 encoding to avoid problems with unicode strings + +from django.db import models + +try: + import cPickle as pickle +except ImportError: + import pickle + +class PickledObject(str): + """A subclass of string so it can be told whether a string is + a pickled object or not (if the object is an instance of this class + then it must [well, should] be a pickled one).""" + pass + +class PickledObjectField(models.Field): + __metaclass__ = models.SubfieldBase + + def to_python(self, value): + if isinstance(value, PickledObject): + # If the value is a definite pickle; and an error is raised in de-pickling + # it should be allowed to propogate. + return pickle.loads( str(value).decode('base64') ) + else: + try: + return pickle.loads( str(value).decode('base64') ) + except: + # If an error was raised, just return the plain value + return value + + def get_db_prep_save(self, value): + if value is not None and not isinstance(value, PickledObject): + value = PickledObject( pickle.dumps(value).encode('base64') ) + return value + + def get_internal_type(self): + return 'TextField' + + def get_db_prep_lookup(self, lookup_type, value): + if lookup_type == 'exact': + value = self.get_db_prep_save(value) + return super(PickledObjectField, self).get_db_prep_lookup(lookup_type, value) + elif lookup_type == 'in': + value = [self.get_db_prep_save(v) for v in value] + return super(PickledObjectField, self).get_db_prep_lookup(lookup_type, value) + else: + raise TypeError('Lookup type %s is not supported.' % lookup_type) diff --git a/image_filer/filters.py b/image_filer/filters.py new file mode 100644 index 0000000..2d4060d --- /dev/null +++ b/image_filer/filters.py @@ -0,0 +1,146 @@ +from inspect import isclass +try: + import Image + import ImageColor + import ImageFile + import ImageFilter + import ImageEnhance +except ImportError: + try: + from PIL import Image + from PIL import ImageColor + from PIL import ImageFile + from PIL import ImageFilter + from PIL import ImageEnhance + except ImportError: + raise ImportError("The Python Imaging Library was not found.") + +filters = [] + +class BaseFilter(object): + pass + +class ResizeFilter(BaseFilter): + name = "Resize to specified dimensions" + identifier = "resize_simple" + def render(self, im, size_x=128, size_y=64, crop=True, crop_from='top', upscale=True): + cur_width, cur_height = im.size + new_width, new_height = (size_x, size_y) + if crop: + ratio = max(float(new_width)/cur_width,float(new_height)/cur_height) + x = (cur_width * ratio) + y = (cur_height * ratio) + xd = abs(new_width - x) + yd = abs(new_height - y) + x_diff = int(xd / 2) + y_diff = int(yd / 2) + if crop_from == 'top': + box = (int(x_diff), 0, int(x_diff+new_width), new_height) + elif crop_from == 'left': + box = (0, int(y_diff), new_width, int(y_diff+new_height)) + elif crop_from == 'bottom': + box = (int(x_diff), int(yd), int(x_diff+new_width), int(y)) # y - yd = new_height + elif crop_from == 'right': + box = (int(xd), int(y_diff), int(x), int(y_diff+new_height)) # x - xd = new_width + else: + box = (int(x_diff), int(y_diff), int(x_diff+new_width), int(y_diff+new_height)) + im = im.resize((int(x), int(y)), Image.ANTIALIAS).crop(box) + else: + if not new_width == 0 and not new_height == 0: + ratio = min(float(new_width)/cur_width, + float(new_height)/cur_height) + else: + if new_width == 0: + ratio = float(new_height)/cur_height + else: + ratio = float(new_width)/cur_width + new_dimensions = (int(round(cur_width*ratio)), + int(round(cur_height*ratio))) + if new_dimensions[0] > cur_width or \ + new_dimensions[1] > cur_height: + if not upscale: + return im + im = im.resize(new_dimensions, Image.ANTIALIAS) + return im +filters.append(ResizeFilter) +class TinyResizeFilterHack(ResizeFilter): + name = "Tiny Resize Filter Hack" + def render(self, im, size_x=24, size_y=24, crop=True, crop_from='top', upscale=True): + return super(TinyResizeFilterHack, self).render(im, size_x=size_x, size_y=size_y) +filters.append(TinyResizeFilterHack) + +class ReflectionFilter(BaseFilter): + name = "Sexy Web 2.0 reflection filter" + identifier = "reflection" + def render(self, im, bgcolor="#FFFFFF", amount=0.4, opacity=0.6): + """ Returns the supplied PIL Image (im) with a reflection effect + + bgcolor The background color of the reflection gradient + amount The height of the reflection as a percentage of the orignal image + opacity The initial opacity of the reflection gradient + + Originally written for the Photologue image management system for Django + and Based on the original concept by Bernd Schlapsi + + """ + # convert bgcolor string to rgb value + background_color = ImageColor.getrgb(bgcolor) + + # copy orignial image and flip the orientation + reflection = im.copy().transpose(Image.FLIP_TOP_BOTTOM) + + # create a new image filled with the bgcolor the same size + background = Image.new("RGB", im.size, background_color) + + # calculate our alpha mask + start = int(255 - (255 * opacity)) # The start of our gradient + steps = int(255 * amount) # the number of intermedite values + increment = (255 - start) / float(steps) + mask = Image.new('L', (1, 255)) + for y in range(255): + if y < steps: + val = int(y * increment + start) + else: + val = 255 + mask.putpixel((0, y), val) + alpha_mask = mask.resize(im.size) + + # merge the reflection onto our background color using the alpha mask + reflection = Image.composite(background, reflection, alpha_mask) + + # crop the reflection + reflection_height = int(im.size[1] * amount) + reflection = reflection.crop((0, 0, im.size[0], reflection_height)) + + # create new image sized to hold both the original image and the reflection + composite = Image.new("RGB", (im.size[0], im.size[1]+reflection_height), background_color) + + # paste the orignal image and the reflection into the composite image + composite.paste(im, (0, 0)) + composite.paste(reflection, (0, im.size[1])) + + # return the image complete with reflection effect + return composite +filters.append(ReflectionFilter) + +""" +Create image filter objects for all the built in PIL filters +""" +for n in dir(ImageFilter): + klass = getattr(ImageFilter, n) + if isclass(klass) and issubclass(klass, ImageFilter.BuiltinFilter) and \ + hasattr(klass, 'name'): + class NewSubclass(BaseFilter): + _pil_filter = klass + name = klass.name + identifier = klass.name + def render(self, im): + return im.filter(self._pil_filter) + NewSubclass.__name__ = "%s%s" % (klass.name, "Filter") + filters.append(NewSubclass) + + + +filters_by_identifier = {} +for filter in filters: + filters_by_identifier[filter.identifier] = filter \ No newline at end of file diff --git a/image_filer/models.py b/image_filer/models.py new file mode 100644 index 0000000..b75e7f0 --- /dev/null +++ b/image_filer/models.py @@ -0,0 +1,768 @@ +import os +import mptt +from django.db import models +from django.db.models import Q +from django.contrib.contenttypes.models import ContentType +from django.contrib.contenttypes import generic +from django.core.files.storage import FileSystemStorage +from django.utils.translation import ugettext_lazy as _ +from datetime import datetime, date +from utils import EXIF +from fields import PickledObjectField +from django.db.models.signals import post_init +from django.utils.functional import curry + +from django.contrib.auth import models as auth_models + +from django.conf import settings + +try: + import uuid +except ImportError: + from django_extensions.utils import uuid + +class UUIDFileSystemStorage(FileSystemStorage): + def get_available_name(self, name): + newuuid = uuid.uuid4() + file_extension = name.split('.')[-1].lower() + r = '%s.%s' % (newuuid, file_extension) + return r +CATALOGUE_BASE_URL = "".join([settings.MEDIA_URL, 'catalogue/']) +CATALOGUE_BASE_PATH = os.path.abspath(os.path.join(settings.MEDIA_ROOT, 'catalogue/')) +print settings.MEDIA_URL +uuid_file_system_storage = UUIDFileSystemStorage( + location=CATALOGUE_BASE_PATH, + base_url=CATALOGUE_BASE_URL + ) + +class AbstractFile(models.Model): + """ + Represents a "File-ish" thing that is in a Folder. Any subclasses must + at least define a foreign key to folder and a file field (or subclass thereof): + folder = models.ForeignKey(Folder, related_name='mytype_files') + file = models.FileField(upload_to='catalogue', storage=uuid_file_system_storage) + Additional attributes may be added to enhance the experience: + get_absolute_url(): link to the object in the front-end + get_absolute_admin_url(): link to the object in the admin interface + get_default_thumbnail_url(): the thumbnail to show in default listings + get_admin_thumbnail_url(): the thumbnail for admin listings + file_type: + """ + file_type = 'unknown' + original_filename = models.CharField(editable=False, max_length=255, blank=True, null=True) + name = models.CharField(max_length=255, null=True, blank=True) + + owner = models.ForeignKey(auth_models.User, related_name='owned_files', null=True, blank=True) + + uploaded_at = models.DateTimeField(auto_now_add=True) + modified_at = models.DateTimeField(auto_now=True) + + def __unicode__(self): + if self.name in ('', None): + text = u"%s" % (self.original_filename,) + else: + text = u"%s" % (self.name,) + return text + + class Meta: + abstract=True + + +class FolderRoot(object): + name = 'Root' + + def _children(self): + return Folder.objects.filter(parent__isnull=True) + children = property(_children) + +class Folder(models.Model): + """ + Represents a Folder that things (files) can be put into. Folders are *NOT* + mirrored in the Filesystem and can have any unicode chars as their name. + Other models may attach to a folder with a ForeignKey. If the related name + ends with "_files" they will automatically be listed in the + folder.files list along with all the other models that link to the folder + in this way. Make sure the linked models obey the AbstractFile interface + (Duck Type). + """ + file_type = 'Folder' + + parent = models.ForeignKey('self', null=True, blank=True, related_name='children') + name = models.CharField(max_length=255) + + owner = models.ForeignKey(auth_models.User, related_name='owned_folders', null=True, blank=True) + + uploaded_at = models.DateTimeField(auto_now_add=True) + + created_at = models.DateTimeField(auto_now_add=True) + modified_at = models.DateTimeField(auto_now=True) + + @property + def files(self): + # TODO: make this a "multi iterator" that can iterate over multiple + # querysets without having to load all objects + rel = [] + for attr in dir(self): + if not attr.startswith('_') and attr.endswith('_files'): + # TODO: also check for fieldtype + rel.append(attr) + result = [] + for r in rel: + files = getattr(self,r) + for file in files.all(): + result.append(file) + return result + + def has_edit_permission(self, request): + return self.has_generic_permission(request, 'edit') + def has_read_permission(self, request): + return self.has_generic_permission(request, 'read') + def has_add_children_permission(self, request): + return self.has_generic_permission(request, 'add_children') + def has_generic_permission(self, request, type): + """ + Return true if the current user has permission on this + folder. Return the string 'ALL' if the user has all rights. + """ + user = request.user + if not user.is_authenticated() or not user.is_staff: + return False + elif user.is_superuser: + return True + elif user == self.owner: + return True + else: + att_name = "permission_%s_cache" % type + if not hasattr(self, "permission_user_cache") or \ + not hasattr(self, att_name) or \ + request.user.pk != self.permission_user_cache.pk: + func = getattr(FolderPermission.objects, "get_%s_id_list" % type) + permission = func(user) + self.permission_user_cache = request.user + if permission == "All" or self.id in permission: + setattr(self, att_name, True) + self.permission_edit_cache = True + else: + setattr(self, att_name, False) + return getattr(self, att_name) + + def __unicode__(self): + return u"<%s: '%s'>" % (self.__class__.__name__, self.name) + class Meta: + unique_together = (('parent','name'),) + +class Image(AbstractFile): + file_type = 'image' + file = models.ImageField(upload_to='catalogue', storage=uuid_file_system_storage, height_field='_file_height', width_field='_file_witdh', null=True, blank=True) + _height_field = models.IntegerField(null=True, blank=True) + _width_field = models.IntegerField(null=True, blank=True) + + date_taken = models.DateTimeField(_('date taken'), null=True, blank=True, editable=False) + + manipulation_profile = models.ForeignKey('ImageManipulationProfile', related_name="images", null=True, blank=True) + + parent = models.ForeignKey('self', null=True, blank=True, related_name='children') + folder = models.ForeignKey(Folder, related_name='image_files', null=True, blank=True) + + contact = models.ForeignKey(auth_models.User, related_name='contact_of_files', null=True, blank=True) + + default_alt_text = models.CharField(max_length=255, blank=True, null=True) + default_caption = models.CharField(max_length=255, blank=True, null=True) + + author = models.CharField(max_length=255, null=True, blank=True) + + must_always_publish_author_credit = models.BooleanField(default=False) + must_always_publish_copyright = models.BooleanField(default=False) + + # TODO: Factor out customer specific fields... maybe a m2m? + can_use_for_web = models.BooleanField(default=True) + can_use_for_print = models.BooleanField(default=True) + can_use_for_teaching = models.BooleanField(default=True) + can_use_for_research = models.BooleanField(default=True) + can_use_for_private_use = models.BooleanField(default=True) + + usage_restriction_notes = models.TextField(null=True, blank=True) + notes = models.TextField(null=True, blank=True) + + has_all_mandatory_data = models.BooleanField(default=False, editable=False) + + def render(self): + if not self.parent: + # if this is a root image rendering is forbidden... the original + # may not be changed + return False + if not self.file and self.parent: + # this is a child image that has no file set yet... generate an id + file_ext = os.path.splitext(self.parent.file.name)[1] + new_uuid = uuid.uuid4() + self.file = "gen-%s.%s" % (new_uuid, file_ext) + im = filters.Image.open(self.parent.file.path) + # Save the original format + im_format = im.format + if self.manipulation_profile: + im = self.manipulation_profile.render(im) + im_filename = '%s' % (self.file.path) + try: + if im_format != 'JPEG': + try: + im.save(im_filename) + return im + except KeyError: + pass + im.save(im_filename, 'JPEG') + except IOError, e: + if os.path.isfile(im_filename): + os.unlink(im_filename) + raise e + + def admin_thumbnail(self): + func = getattr(self, 'get_admin_thumbnail_url', None) + if func is None: + return _('An "admin_thumbnail" photo size has not been defined.') + else: + if hasattr(self, 'get_absolute_url'): + return u'' % \ + (self.get_absolute_url(), func()) + else: + return u'' % \ + (self.image.url, func()) + admin_thumbnail.short_description = _('Thumbnail') + admin_thumbnail.allow_tags = True + + def cache_path(self): + if self.file: + return os.path.join(os.path.dirname(self.file.path),"cache") + cache_path = property(cache_path) + def cache_url(self): + if self.file: + return '/'.join([os.path.dirname(self.file.url), "cache"]) + cache_url = property(cache_url) + + def _image_filename(self): + if self.file: + return os.path.basename(self.file.path) + image_filename = property(_image_filename) + + def _get_filename_for_template(self, template): + """ + template: either a ImageManipulationTemplate instance or just a simple + string representing the identifier of one. + """ + template = getattr(template, 'identifier', template) + base, ext = os.path.splitext(self.image_filename) + return ''.join([base, '_', template, ext]) + + def _get_TEMPLATE_template(self, template): + return ImageManipulationTemplateCache().templates.get(template) + + def _get_TEMPLATE_size(self, template): + template = ImageManipulationTemplateCache().templates.get(template) + if not self.template_file_exists(template): + self.create_template_file(template) + return filters.Image.open(self._get_TEMPLATE_filename(template)).size + + def _get_TEMPLATE_url(self, template): + template = ImageManipulationTemplateCache().templates.get(template) + if not self.cached_template_file_exists(template): + print "generating cache image" + self.create_template_file_cache(template) + return '/'.join( [self.cache_url, self._get_filename_for_template(template)] ) + + def _get_TEMPLATE_filename(self, template): + template = ImageManipulationTemplateCache().templates.get(template) + return os.path.join( self.cache_path, self._get_filename_for_template(template) ) + + def add_accessor_methods(self, *args, **kwargs): + for template in ImageManipulationTemplateCache().templates.keys(): + setattr(self, 'get_%s_template' % template, + curry(self._get_TEMPLATE_template, template=template)) + setattr(self, 'get_%s_size' % template, + curry(self._get_TEMPLATE_size, template=template)) + setattr(self, 'get_%s_url' % template, + curry(self._get_TEMPLATE_url, template=template)) + setattr(self, 'get_%s_filename' % template, + curry(self._get_TEMPLATE_filename, template=template)) + + def cached_template_file_exists(self, template): + """ + checks if the image for this template exists + """ + func = getattr(self, "get_%s_filename" % template.identifier, None) + if func is not None: + if os.path.isfile(func()): + return True + return False + + def create_template_file_cache(self, template): + """ + creates the image for this template in the cache + """ + if self.cached_template_file_exists(template): + return + if not os.path.isdir(self.cache_path): + os.makedirs(self.cache_path) + try: + im = filters.Image.open(self.file.path) + except IOError: + return + # Save the original format + im_format = im.format + #print im_format + # Apply the filters + im = template.render(im) + im_filename = getattr(self, "get_%s_filename" % template.identifier)() + try: + if im_format != 'JPEG': + try: + im.save(im_filename) + return + except KeyError: + pass + im.save(im_filename, 'JPEG') + except IOError, e: + print "error: ", e + if os.path.isfile(im_filename): + os.unlink(im_filename) + raise e + def remove_cache_template_file(self, template, remove_dirs=True): + if not self.cached_template_file_exists(template): + return + filename = getattr(self, "get_%s_filename" % template.identifier)() + if os.path.isfile(filename): + os.remove(filename) + if remove_dirs: + self.remove_cache_dirs() + def clear_cache(self): + cache = ImageManipulationTemplateCache() + for template in cache.templates.values(): + self.remove_cache_template_file(template, remove_dirs=False) + self.remove_cache_dirs() + + def pre_cache(self): + cache = ImageManipulationTemplateCache() + for template in cache.templates.values(): + if template.pre_cache: + self.create_template_file_cache(template) + def remove_cache_dirs(self): + try: + os.removedirs(self.cache_path) + except: + pass + def get_absolute_url(self): + # TODO: fix url do be more robust + return '%s%s' % (CATALOGUE_BASE_URL,self.file.name) + + def _check_validity(self): + if not self.name or not self.contact: + return False + return True + + def save(self, *args, **kwargs): + if self.date_taken is None: + try: + exif_date = self.EXIF.get('EXIF DateTimeOriginal',None) + if exif_date is not None: + d, t = str.split(exif_date.values) + year, month, day = d.split(':') + hour, minute, second = t.split(':') + self.date_taken = datetime(int(year), int(month), int(day), + int(hour), int(minute), int(second)) + except: + pass + if self.date_taken is None: + self.date_taken = datetime.now() + self.render() + if self._get_pk_val(): + self.clear_cache() + if not self.contact: + self.contact = self.owner + self.has_all_mandatory_data = self._check_validity() + super(Image, self).save(*args, **kwargs) + self.pre_cache() + def delete(self): + assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname) + self.clear_cache() + super(Image, self).delete() + #check that all mandatory data is set and save the result to has_all_mandatory_data + def has_edit_permission(self, request): + return self.has_generic_permission(request, 'edit') + def has_read_permission(self, request): + return self.has_generic_permission(request, 'read') + def has_add_children_permission(self, request): + return self.has_generic_permission(request, 'add_children') + def has_generic_permission(self, request, type): + """ + Return true if the current user has permission on this + image. Return the string 'ALL' if the user has all rights. + """ + user = request.user + if not user.is_authenticated() or not user.is_staff: + return False + elif user.is_superuser: + return True + elif user == self.owner: + return True + else: + att_name = "permission_%s_cache" % type + if not hasattr(self, "permission_user_cache") or \ + not hasattr(self, att_name) or \ + request.user.pk != self.permission_user_cache.pk: + func = getattr(ImagePermission.objects, "get_%s_id_list" % type) + permission = func(user) + self.permission_user_cache = request.user + if permission == "All" or self.id in permission: + setattr(self, att_name, True) + self.permission_edit_cache = True + else: + setattr(self, att_name, False) + return getattr(self, att_name) + def label(self): + if self.name in ['',None]: + return self.original_filename or 'unnamed file' + else: + return self.name + label = property(label) + def __unicode__(self): + return self.label +# MPTT registration +mptt_models = [Folder, Image] +for mptt_model in mptt_models: + try: + mptt.register(mptt_model) + except mptt.AlreadyRegistered: + pass + + +class FolderPermissionManager(models.Manager): + def get_read_id_list(self, user): + """ + Give a list of a Folders where the user has read rights or the string + "All" if the user has all rights. + """ + return self.__get_id_list(user, "can_read") + def get_edit_id_list(self, user): + return self.__get_id_list(user, "can_edit") + def get_add_children_id_list(self, user): + return self.__get_id_list(user, "can_add_children") + def __get_id_list(self, user, attr): + if user.is_superuser: + return 'All' + allow_list = [] + deny_list = [] + group_ids = user.groups.all().values_list('id', flat=True) + q = Q(user=user)|Q(group__in=group_ids)|Q(everybody=True) + perms = self.filter(q).order_by('folder__tree_id', 'folder__level', + 'folder__lft') + for perm in perms: + if perm.type == FolderPermission.ALL: + if getattr(perm, attr): + allow_list = list(Folder.objects.all().values_list('id', flat=True)) + else: + return [] + if getattr(perm, attr): + if perm.folder.id not in allow_list: + allow_list.append(perm.folder.id) + if perm.folder.id in deny_list: + deny_list.remove(perm.folder.id) + else: + if perm.folder.id not in deny_list: + deny_list.append(perm.folder.id) + if perm.folder.id in allow_list: + allow_list.remove(perm.folder.id) + if perm.type == FolderPermission.CHILDREN: + for id in perm.folder.get_descendants().values_list('id', flat=True): + if getattr(perm, attr): + if id not in allow_list: + allow_list.append(id) + if id in deny_list: + deny_list.remove(id) + else: + if id not in deny_list: + deny_list.append(id) + if id in allow_list: + allow_list.remove(id) + return allow_list + +class FolderPermission(models.Model): + ALL = 0 + THIS = 1 + CHILDREN = 2 + + TYPES = ( + (ALL, _('all items') ), + (THIS, _('this item only') ), + (CHILDREN, _('this item and all children') ), + ) + ''' + content_type = models.ForeignKey(ContentType, null=True, blank=True) + object_id = models.PositiveIntegerField() + content_object = generic.GenericForeignKey('content_type', 'object_id') + ''' + folder = models.ForeignKey(Folder, null=True, blank=True) + + type = models.SmallIntegerField(_('type'), choices=TYPES, default=0) + user = models.ForeignKey(auth_models.User, verbose_name=_("user"), blank=True, null=True) + group = models.ForeignKey(auth_models.Group, verbose_name=_("group"), blank=True, null=True) + everybody = models.BooleanField(_("everybody"), default=False) + + can_edit = models.BooleanField(_("can edit"), default=True) + can_read = models.BooleanField(_("can read"), default=False) + can_add_children = models.BooleanField(_("can add children"), default=True) + + objects = FolderPermissionManager() + + def __unicode__(self): + if self.folder: + name = u'%s' % self.folder + else: + name = u'All Folders' + + ug = [] + if self.everybody: + user = 'Everybody' + else: + if self.group: + ug.append(u"Group: %s" % self.group) + if self.user: + ug.append(u"User: %s" % self.user) + usergroup = " ".join(ug) + + return u"%s (%s)" % (usergroup, unicode(self.TYPES[self.type][1])) + class Meta: + verbose_name = _('Folder Permission') + verbose_name_plural = _('Folder Permissions') + + + +class ImagePermissionManager(models.Manager): + def get_read_id_list(self, user): + """ + Give a list of a Images where the user has read rights or the string + "All" if the user has all rights. + """ + return self.__get_id_list(user, "can_read") + def get_edit_id_list(self, user): + return self.__get_id_list(user, "can_edit") + def get_add_children_id_list(self, user): + return self.__get_id_list(user, "can_add_children") + def __get_id_list(self, user, attr): + if user.is_superuser: + return 'All' + allow_list = [] + deny_list = [] + group_ids = user.groups.all().values_list('id', flat=True) + q = Q(user=user)|Q(group__in=group_ids)|Q(everybody=True) + perms = self.filter(q).order_by('image__tree_id', 'image__level', + 'image__lft') + for perm in perms: + if perm.type == ImagePermission.ALL: + if getattr(perm, attr): + allow_list = list(Image.objects.all().values_list('id', flat=True)) + else: + return [] + if getattr(perm, attr): + if perm.image.id not in allow_list: + allow_list.append(perm.image.id) + if perm.image.id in deny_list: + deny_list.remove(perm.image.id) + else: + if perm.image.id not in deny_list: + deny_list.append(perm.image.id) + if perm.image.id in allow_list: + allow_list.remove(perm.image.id) + if perm.type == ImagePermission.CHILDREN: + for id in perm.image.get_descendants().values_list('id', flat=True): + if getattr(perm, attr): + if id not in allow_list: + allow_list.append(id) + if id in deny_list: + deny_list.remove(id) + else: + if id not in deny_list: + deny_list.append(id) + if id in allow_list: + allow_list.remove(id) + return allow_list + +class ImagePermission(models.Model): + ALL = 0 + THIS = 1 + CHILDREN = 2 + + TYPES = ( + (ALL, _('all items') ), + (THIS, _('this item only') ), + (CHILDREN, _('this item and all children') ), + ) + ''' + content_type = models.ForeignKey(ContentType, null=True, blank=True) + object_id = models.PositiveIntegerField() + content_object = generic.GenericForeignKey('content_type', 'object_id') + ''' + image = models.ForeignKey(Image, null=True, blank=True) + + type = models.SmallIntegerField(_('type'), choices=TYPES, default=0) + user = models.ForeignKey(auth_models.User, verbose_name=_("user"), blank=True, null=True) + group = models.ForeignKey(auth_models.Group, verbose_name=_("group"), blank=True, null=True) + everybody = models.BooleanField(_("everybody"), default=False) + + can_edit = models.BooleanField(_("can edit"), default=True) + can_read = models.BooleanField(_("can read"), default=False) + can_add_children = models.BooleanField(_("can add children"), default=True) + + objects = ImagePermissionManager() + + def __unicode__(self): + return u"%s: %s" % (self.user or self.group, unicode(self.TYPES[self.type][1])) + class Meta: + verbose_name = _('Image Permission') + verbose_name_plural = _('Image Permissions') + + + +import filters +FILTER_CHOICES = [] +for filter in filters.filters: + FILTER_CHOICES.append( (filter.identifier, filter.name) ) +#print filters.filters +#print FILTER_CHOICES + +class ImageManipulationProfile(models.Model): + name = models.CharField(max_length=255, null=True, blank=True) + description = models.TextField(null=True, blank=True) + + show_in_library = models.BooleanField(default=False) + + def render_to_file(self, image): + #prepare directories + cache_path = image.cache_path + if not os.path.isdir(cache_path): + os.makedirs(cache_path) + im = filters.Image.open(image.file.path) + # Save the original format + im_format = im.format + im = self.render(im) + im_filename = '%s%s' % (cache_path, image.file.name) + #print image.file.path + #print cache_path + #print im_filename + try: + if im_format != 'JPEG': + try: + im.save(im_filename) + return im + except KeyError: + pass + im.save(im_filename, 'JPEG') + except IOError, e: + if os.path.isfile(im_filename): + os.unlink(im_filename) + raise e + def render(self, im): + for step in self.steps.order_by('order'): + im = step.render(im) + return im + def __unicode__(self): + return self.name + + +class ImageManipulationStep(models.Model): + template = models.ForeignKey(ImageManipulationProfile, related_name='steps') + filter_identifier = models.CharField(max_length=255, choices=FILTER_CHOICES) + name = models.CharField(max_length=255, null=True, blank=True) + description = models.TextField(null=True, blank=True) + data = PickledObjectField(default={}) + order = models.IntegerField(default=0) + + def render(self, im): + FilterClass = filters.filters_by_identifier[self.filter_identifier] + filter_class_instance = FilterClass() + return filter_class_instance.render(im) + + class Meta: + ordering = ("order",) + unique_together = (("template","order"),) + +class ImageManipulationTemplate(models.Model): + identifier = models.CharField(max_length=255, unique=True) + name = models.CharField(max_length=255, null=True, blank=True) + description = models.TextField(null=True, blank=True) + + profile = models.ForeignKey(ImageManipulationProfile, related_name='templates') + + pre_cache = models.BooleanField(_('pre-cache?'), default=False, help_text=_('If selected this photo size will be pre-cached as photos are added.')) + + def render(self, im): + return self.profile.render(im) + + def clear_cache(self): + for cls in Image.__subclasses__(): + for obj in cls.objects.all(): + obj.remove_cache_template_file(self) + if self.pre_cache: + obj.create_template_file_cache(self) + ImageManipulationTemplateCache().reset() + def save(self, *args, **kwargs): + super(ImageManipulationTemplate, self).save(*args, **kwargs) + ImageManipulationTemplateCache().reset() + self.clear_cache() + + def delete(self): + assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname) + self.clear_cache() + super(ImageManipulationTemplate, self).delete() + def __unicode__(self): + return u"%s (%s)" % (self.name, self.identifier) +class ImageManipulationTemplateCache(object): + __state = {"templates": {}} + def __init__(self): + self.__dict__ = self.__state + if not len(self.templates): + templates = ImageManipulationTemplate.objects.all() + for template in templates: + self.templates[template.identifier] = template + + def reset(self): + self.templates = {} +# Set up the accessor methods +def add_methods(sender, instance, signal, *args, **kwargs): + """ Adds methods to access sized images (urls, paths) + + after the Photo model's __init__ function completes, + this method calls "add_accessor_methods" on each instance. + """ + if hasattr(instance, 'add_accessor_methods'): + instance.add_accessor_methods() + +# connect the add_accessor_methods function to the post_init signal +post_init.connect(add_methods) + + +class Bucket(models.Model): + user = models.ForeignKey(auth_models.User, related_name="buckets") + files = models.ManyToManyField(Image, related_name="buckets", through='BucketItem') + + def append_file(self, file): + newitem = BucketItem(file=file, bucket=self) + newitem.save() + + def empty(self): + for item in self.bucket_items.all(): + item.delete() + empty.alters_data = True + + def create_zip(self): + return 'zipfile' + def clone(self, to_folder=None): + pass + def set_image_manipulation_profile(self): + pass + + def __unicode__(self): + return u"Bucket %s of %s" % (self.id, self.user) + +class BucketItem(models.Model): + file = models.ForeignKey(Image) + bucket = models.ForeignKey(Bucket) + is_checked = models.BooleanField(default=True) + \ No newline at end of file diff --git a/image_filer/templates/admin/image_filer/image/change_form.html b/image_filer/templates/admin/image_filer/image/change_form.html new file mode 100644 index 0000000..f96b96a --- /dev/null +++ b/image_filer/templates/admin/image_filer/image/change_form.html @@ -0,0 +1,25 @@ +{% extends "admin/change_form.html" %} + +{% block breadcrumbs %}{% endblock %} + +{% block coltype %}colMS{% endblock %} +{% block sidebar %} + +{% endblock %} \ No newline at end of file diff --git a/image_filer/templates/image_filer/base.html b/image_filer/templates/image_filer/base.html new file mode 100644 index 0000000..e1628d0 --- /dev/null +++ b/image_filer/templates/image_filer/base.html @@ -0,0 +1,50 @@ +{% extends "admin/base_site.html" %} +{% load i18n %} +{% load adminmedia %} + +{% block extrahead %}{{ block.super }} + + + + + + + + + + + + + + + + + +{% endblock %} + +{% block coltype %}flex{% endblock %} +{% block bodyclass %}change-list filebrowser yui-skin-sam{% endblock %} +{% block extrastyle %}{{ block.super }} + +{% comment %}{% endcomment %} +{% if query.pop %} + +{% endif %} +{% endblock %} diff --git a/image_filer/templates/image_filer/directory_listing.html b/image_filer/templates/image_filer/directory_listing.html new file mode 100644 index 0000000..fc8f962 --- /dev/null +++ b/image_filer/templates/image_filer/directory_listing.html @@ -0,0 +1,93 @@ +{% extends "image_filer/base.html" %} +{% load i18n %} +{% load adminmedia %} + +{% block extrahead %}{{ block.super }} +{% endblock %} + +{% block coltype %}colMS{% endblock %} + + +{% block extrastyle %}{{ block.super }} +{% endblock %} + +{% block breadcrumbs %} + +{% endblock %} + +{% block sidebar %} + +{% endblock %} + + +{% block content %} +
+ has_edit_permission: {{permissions.has_edit_permission }} | has_read_permission: {{permissions.has_read_permission}} | has_add_children_permission: {{permissions.has_add_children_permission}} +
perms: {{ permstest }} + +
+ {% comment %} + {% endcomment %} +
+ + {% include "image_filer/include/tableheader.html" %} + + {% if folder.parent %}{% if folder.parent.id %} + + + + + + + + + {% endif %}{% endif %} + {% for file in folder_children %} + {% include "image_filer/include/folderlisting.html" %} + {% endfor %} + {% for file in folder_files %} + {% include "image_filer/include/folderlisting.html" %} + {% endfor %} + {% comment %} + + + + + + + + {% for file in folder.children.all %} + {% include "image_filer/include/folderlisting.html" %} + {% endfor %} + + + + + + + + {% for file in folder.files %} + {% include "image_filer/include/folderlisting.html" %} + {% endfor %} + {% endcomment %} + +
..
All Folders
All Files
+
+
+
+{% include "image_filer/include/new_folder_dialog.html" %} +{% include "image_filer/include/upload_dialog.html" %} + +{% endblock %} \ No newline at end of file diff --git a/image_filer/templates/image_filer/include/bucket.html b/image_filer/templates/image_filer/include/bucket.html new file mode 100644 index 0000000..a713076 --- /dev/null +++ b/image_filer/templates/image_filer/include/bucket.html @@ -0,0 +1,10 @@ +Buckets: {{ user }} +{% for bucket in user.buckets.all %} +

Bucket {{ bucket.id }}

+ +move files to current folder +{% endfor %} + \ No newline at end of file diff --git a/image_filer/templates/image_filer/include/export_dialog.html b/image_filer/templates/image_filer/include/export_dialog.html new file mode 100644 index 0000000..e903af8 --- /dev/null +++ b/image_filer/templates/image_filer/include/export_dialog.html @@ -0,0 +1,39 @@ + + + \ No newline at end of file diff --git a/image_filer/templates/image_filer/include/folderlisting.html b/image_filer/templates/image_filer/include/folderlisting.html new file mode 100644 index 0000000..78e812f --- /dev/null +++ b/image_filer/templates/image_filer/include/folderlisting.html @@ -0,0 +1,18 @@ +{% load adminmedia %} +{% load i18n %} + + + + + {% if file.get_admin_thumbnail_url %}{{ file.default_alt_text }}{% endif %} + + {% ifequal file.file_type 'Folder' %} + {{ file.name }} + {% else %} + {{ file.name }}
{{ file.file.width }} x {{ file.file.height }} px + {% endifequal %} + {{ file.owner }} + {{ file.perms }} + + + \ No newline at end of file diff --git a/image_filer/templates/image_filer/include/new_folder_dialog.html b/image_filer/templates/image_filer/include/new_folder_dialog.html new file mode 100644 index 0000000..d5e86b8 --- /dev/null +++ b/image_filer/templates/image_filer/include/new_folder_dialog.html @@ -0,0 +1,40 @@ + + + \ No newline at end of file diff --git a/image_filer/templates/image_filer/include/new_folder_form.html b/image_filer/templates/image_filer/include/new_folder_form.html new file mode 100644 index 0000000..8bba827 --- /dev/null +++ b/image_filer/templates/image_filer/include/new_folder_form.html @@ -0,0 +1,3 @@ +
+ {{ new_folder_form }} +
\ No newline at end of file diff --git a/image_filer/templates/image_filer/include/upload_dialog.html b/image_filer/templates/image_filer/include/upload_dialog.html new file mode 100644 index 0000000..82ce652 --- /dev/null +++ b/image_filer/templates/image_filer/include/upload_dialog.html @@ -0,0 +1,245 @@ + + + + + + + \ No newline at end of file diff --git a/image_filer/urls.py b/image_filer/urls.py new file mode 100644 index 0000000..1ec19fb --- /dev/null +++ b/image_filer/urls.py @@ -0,0 +1,15 @@ +from django.conf.urls.defaults import * + +urlpatterns = patterns('image_filer.views', + url(r'^directory/(?P\d+)/$', 'directory_listing', name='image_filer-directory_listing'), + url(r'^directory/$', 'directory_listing', name='image_filer-directory_listing-root'), + url(r'^directory/(?P\d+)/make_folder/$', 'make_folder', name='image_filer-directory_listing-make_folder'), + url(r'^directory/make_folder/$', 'make_folder', name='image_filer-directory_listing-make_root_folder'), + + url(r'^directory/(?P\d+)/upload/$', 'upload', name='image_filer-upload'), + url(r'^directory/upload/$', 'upload', name='image_filer-upload'), + + url(r'^bucket/(?P\d+)/move_to_folder/$', 'move_files_to_folder', name='image_filer-move_bucket_to_folder'), + url(r'^file/move_to_folder/$', 'move_files_to_folder', name='image_filer-move_files_to_folder'), + +) diff --git a/image_filer/utils/EXIF.py b/image_filer/utils/EXIF.py new file mode 100644 index 0000000..ed4192a --- /dev/null +++ b/image_filer/utils/EXIF.py @@ -0,0 +1,1767 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Library to extract EXIF information from digital camera image files +# http://sourceforge.net/projects/exif-py/ +# +# VERSION 1.1.0 +# +# To use this library call with: +# f = open(path_name, 'rb') +# tags = EXIF.process_file(f) +# +# To ignore MakerNote tags, pass the -q or --quick +# command line arguments, or as +# tags = EXIF.process_file(f, details=False) +# +# To stop processing after a certain tag is retrieved, +# pass the -t TAG or --stop-tag TAG argument, or as +# tags = EXIF.process_file(f, stop_tag='TAG') +# +# where TAG is a valid tag name, ex 'DateTimeOriginal' +# +# These 2 are useful when you are retrieving a large list of images +# +# +# To return an error on invalid tags, +# pass the -s or --strict argument, or as +# tags = EXIF.process_file(f, strict=True) +# +# Otherwise these tags will be ignored +# +# Returned tags will be a dictionary mapping names of EXIF tags to their +# values in the file named by path_name. You can process the tags +# as you wish. In particular, you can iterate through all the tags with: +# for tag in tags.keys(): +# if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename', +# 'EXIF MakerNote'): +# print "Key: %s, value %s" % (tag, tags[tag]) +# (This code uses the if statement to avoid printing out a few of the +# tags that tend to be long or boring.) +# +# The tags dictionary will include keys for all of the usual EXIF +# tags, and will also include keys for Makernotes used by some +# cameras, for which we have a good specification. +# +# Note that the dictionary keys are the IFD name followed by the +# tag name. For example: +# 'EXIF DateTimeOriginal', 'Image Orientation', 'MakerNote FocusMode' +# +# Copyright (c) 2002-2007 Gene Cash All rights reserved +# Copyright (c) 2007-2008 Ianaré Sévi All rights reserved +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# 3. Neither the name of the authors nor the names of its contributors +# may be used to endorse or promote products derived from this +# software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# +# ----- See 'changes.txt' file for all contributors and changes ----- # +# + + +# Don't throw an exception when given an out of range character. +def make_string(seq): + str = '' + for c in seq: + # Screen out non-printing characters + if 32 <= c and c < 256: + str += chr(c) + # If no printing chars + if not str: + return seq + return str + +# Special version to deal with the code in the first 8 bytes of a user comment. +# First 8 bytes gives coding system e.g. ASCII vs. JIS vs Unicode +def make_string_uc(seq): + code = seq[0:8] + seq = seq[8:] + # Of course, this is only correct if ASCII, and the standard explicitly + # allows JIS and Unicode. + return make_string(seq) + +# field type descriptions as (length, abbreviation, full name) tuples +FIELD_TYPES = ( + (0, 'X', 'Proprietary'), # no such type + (1, 'B', 'Byte'), + (1, 'A', 'ASCII'), + (2, 'S', 'Short'), + (4, 'L', 'Long'), + (8, 'R', 'Ratio'), + (1, 'SB', 'Signed Byte'), + (1, 'U', 'Undefined'), + (2, 'SS', 'Signed Short'), + (4, 'SL', 'Signed Long'), + (8, 'SR', 'Signed Ratio'), + ) + +# dictionary of main EXIF tag names +# first element of tuple is tag name, optional second element is +# another dictionary giving names to values +EXIF_TAGS = { + 0x0100: ('ImageWidth', ), + 0x0101: ('ImageLength', ), + 0x0102: ('BitsPerSample', ), + 0x0103: ('Compression', + {1: 'Uncompressed', + 2: 'CCITT 1D', + 3: 'T4/Group 3 Fax', + 4: 'T6/Group 4 Fax', + 5: 'LZW', + 6: 'JPEG (old-style)', + 7: 'JPEG', + 8: 'Adobe Deflate', + 9: 'JBIG B&W', + 10: 'JBIG Color', + 32766: 'Next', + 32769: 'Epson ERF Compressed', + 32771: 'CCIRLEW', + 32773: 'PackBits', + 32809: 'Thunderscan', + 32895: 'IT8CTPAD', + 32896: 'IT8LW', + 32897: 'IT8MP', + 32898: 'IT8BL', + 32908: 'PixarFilm', + 32909: 'PixarLog', + 32946: 'Deflate', + 32947: 'DCS', + 34661: 'JBIG', + 34676: 'SGILog', + 34677: 'SGILog24', + 34712: 'JPEG 2000', + 34713: 'Nikon NEF Compressed', + 65000: 'Kodak DCR Compressed', + 65535: 'Pentax PEF Compressed'}), + 0x0106: ('PhotometricInterpretation', ), + 0x0107: ('Thresholding', ), + 0x010A: ('FillOrder', ), + 0x010D: ('DocumentName', ), + 0x010E: ('ImageDescription', ), + 0x010F: ('Make', ), + 0x0110: ('Model', ), + 0x0111: ('StripOffsets', ), + 0x0112: ('Orientation', + {1: 'Horizontal (normal)', + 2: 'Mirrored horizontal', + 3: 'Rotated 180', + 4: 'Mirrored vertical', + 5: 'Mirrored horizontal then rotated 90 CCW', + 6: 'Rotated 90 CW', + 7: 'Mirrored horizontal then rotated 90 CW', + 8: 'Rotated 90 CCW'}), + 0x0115: ('SamplesPerPixel', ), + 0x0116: ('RowsPerStrip', ), + 0x0117: ('StripByteCounts', ), + 0x011A: ('XResolution', ), + 0x011B: ('YResolution', ), + 0x011C: ('PlanarConfiguration', ), + 0x011D: ('PageName', make_string), + 0x0128: ('ResolutionUnit', + {1: 'Not Absolute', + 2: 'Pixels/Inch', + 3: 'Pixels/Centimeter'}), + 0x012D: ('TransferFunction', ), + 0x0131: ('Software', ), + 0x0132: ('DateTime', ), + 0x013B: ('Artist', ), + 0x013E: ('WhitePoint', ), + 0x013F: ('PrimaryChromaticities', ), + 0x0156: ('TransferRange', ), + 0x0200: ('JPEGProc', ), + 0x0201: ('JPEGInterchangeFormat', ), + 0x0202: ('JPEGInterchangeFormatLength', ), + 0x0211: ('YCbCrCoefficients', ), + 0x0212: ('YCbCrSubSampling', ), + 0x0213: ('YCbCrPositioning', + {1: 'Centered', + 2: 'Co-sited'}), + 0x0214: ('ReferenceBlackWhite', ), + + 0x4746: ('Rating', ), + + 0x828D: ('CFARepeatPatternDim', ), + 0x828E: ('CFAPattern', ), + 0x828F: ('BatteryLevel', ), + 0x8298: ('Copyright', ), + 0x829A: ('ExposureTime', ), + 0x829D: ('FNumber', ), + 0x83BB: ('IPTC/NAA', ), + 0x8769: ('ExifOffset', ), + 0x8773: ('InterColorProfile', ), + 0x8822: ('ExposureProgram', + {0: 'Unidentified', + 1: 'Manual', + 2: 'Program Normal', + 3: 'Aperture Priority', + 4: 'Shutter Priority', + 5: 'Program Creative', + 6: 'Program Action', + 7: 'Portrait Mode', + 8: 'Landscape Mode'}), + 0x8824: ('SpectralSensitivity', ), + 0x8825: ('GPSInfo', ), + 0x8827: ('ISOSpeedRatings', ), + 0x8828: ('OECF', ), + 0x9000: ('ExifVersion', make_string), + 0x9003: ('DateTimeOriginal', ), + 0x9004: ('DateTimeDigitized', ), + 0x9101: ('ComponentsConfiguration', + {0: '', + 1: 'Y', + 2: 'Cb', + 3: 'Cr', + 4: 'Red', + 5: 'Green', + 6: 'Blue'}), + 0x9102: ('CompressedBitsPerPixel', ), + 0x9201: ('ShutterSpeedValue', ), + 0x9202: ('ApertureValue', ), + 0x9203: ('BrightnessValue', ), + 0x9204: ('ExposureBiasValue', ), + 0x9205: ('MaxApertureValue', ), + 0x9206: ('SubjectDistance', ), + 0x9207: ('MeteringMode', + {0: 'Unidentified', + 1: 'Average', + 2: 'CenterWeightedAverage', + 3: 'Spot', + 4: 'MultiSpot', + 5: 'Pattern'}), + 0x9208: ('LightSource', + {0: 'Unknown', + 1: 'Daylight', + 2: 'Fluorescent', + 3: 'Tungsten', + 9: 'Fine Weather', + 10: 'Flash', + 11: 'Shade', + 12: 'Daylight Fluorescent', + 13: 'Day White Fluorescent', + 14: 'Cool White Fluorescent', + 15: 'White Fluorescent', + 17: 'Standard Light A', + 18: 'Standard Light B', + 19: 'Standard Light C', + 20: 'D55', + 21: 'D65', + 22: 'D75', + 255: 'Other'}), + 0x9209: ('Flash', + {0: 'No', + 1: 'Fired', + 5: 'Fired (?)', # no return sensed + 7: 'Fired (!)', # return sensed + 9: 'Fill Fired', + 13: 'Fill Fired (?)', + 15: 'Fill Fired (!)', + 16: 'Off', + 24: 'Auto Off', + 25: 'Auto Fired', + 29: 'Auto Fired (?)', + 31: 'Auto Fired (!)', + 32: 'Not Available'}), + 0x920A: ('FocalLength', ), + 0x9214: ('SubjectArea', ), + 0x927C: ('MakerNote', ), + 0x9286: ('UserComment', make_string_uc), + 0x9290: ('SubSecTime', ), + 0x9291: ('SubSecTimeOriginal', ), + 0x9292: ('SubSecTimeDigitized', ), + + # used by Windows Explorer + 0x9C9B: ('XPTitle', ), + 0x9C9C: ('XPComment', ), + 0x9C9D: ('XPAuthor', ), #(ignored by Windows Explorer if Artist exists) + 0x9C9E: ('XPKeywords', ), + 0x9C9F: ('XPSubject', ), + + 0xA000: ('FlashPixVersion', make_string), + 0xA001: ('ColorSpace', + {1: 'sRGB', + 2: 'Adobe RGB', + 65535: 'Uncalibrated'}), + 0xA002: ('ExifImageWidth', ), + 0xA003: ('ExifImageLength', ), + 0xA005: ('InteroperabilityOffset', ), + 0xA20B: ('FlashEnergy', ), # 0x920B in TIFF/EP + 0xA20C: ('SpatialFrequencyResponse', ), # 0x920C + 0xA20E: ('FocalPlaneXResolution', ), # 0x920E + 0xA20F: ('FocalPlaneYResolution', ), # 0x920F + 0xA210: ('FocalPlaneResolutionUnit', ), # 0x9210 + 0xA214: ('SubjectLocation', ), # 0x9214 + 0xA215: ('ExposureIndex', ), # 0x9215 + 0xA217: ('SensingMethod', # 0x9217 + {1: 'Not defined', + 2: 'One-chip color area', + 3: 'Two-chip color area', + 4: 'Three-chip color area', + 5: 'Color sequential area', + 7: 'Trilinear', + 8: 'Color sequential linear'}), + 0xA300: ('FileSource', + {1: 'Film Scanner', + 2: 'Reflection Print Scanner', + 3: 'Digital Camera'}), + 0xA301: ('SceneType', + {1: 'Directly Photographed'}), + 0xA302: ('CVAPattern', ), + 0xA401: ('CustomRendered', + {0: 'Normal', + 1: 'Custom'}), + 0xA402: ('ExposureMode', + {0: 'Auto Exposure', + 1: 'Manual Exposure', + 2: 'Auto Bracket'}), + 0xA403: ('WhiteBalance', + {0: 'Auto', + 1: 'Manual'}), + 0xA404: ('DigitalZoomRatio', ), + 0xA405: ('FocalLengthIn35mmFilm', ), + 0xA406: ('SceneCaptureType', + {0: 'Standard', + 1: 'Landscape', + 2: 'Portrait', + 3: 'Night)'}), + 0xA407: ('GainControl', + {0: 'None', + 1: 'Low gain up', + 2: 'High gain up', + 3: 'Low gain down', + 4: 'High gain down'}), + 0xA408: ('Contrast', + {0: 'Normal', + 1: 'Soft', + 2: 'Hard'}), + 0xA409: ('Saturation', + {0: 'Normal', + 1: 'Soft', + 2: 'Hard'}), + 0xA40A: ('Sharpness', + {0: 'Normal', + 1: 'Soft', + 2: 'Hard'}), + 0xA40B: ('DeviceSettingDescription', ), + 0xA40C: ('SubjectDistanceRange', ), + 0xA500: ('Gamma', ), + 0xC4A5: ('PrintIM', ), + 0xEA1C: ('Padding', ), + } + +# interoperability tags +INTR_TAGS = { + 0x0001: ('InteroperabilityIndex', ), + 0x0002: ('InteroperabilityVersion', ), + 0x1000: ('RelatedImageFileFormat', ), + 0x1001: ('RelatedImageWidth', ), + 0x1002: ('RelatedImageLength', ), + } + +# GPS tags (not used yet, haven't seen camera with GPS) +GPS_TAGS = { + 0x0000: ('GPSVersionID', ), + 0x0001: ('GPSLatitudeRef', ), + 0x0002: ('GPSLatitude', ), + 0x0003: ('GPSLongitudeRef', ), + 0x0004: ('GPSLongitude', ), + 0x0005: ('GPSAltitudeRef', ), + 0x0006: ('GPSAltitude', ), + 0x0007: ('GPSTimeStamp', ), + 0x0008: ('GPSSatellites', ), + 0x0009: ('GPSStatus', ), + 0x000A: ('GPSMeasureMode', ), + 0x000B: ('GPSDOP', ), + 0x000C: ('GPSSpeedRef', ), + 0x000D: ('GPSSpeed', ), + 0x000E: ('GPSTrackRef', ), + 0x000F: ('GPSTrack', ), + 0x0010: ('GPSImgDirectionRef', ), + 0x0011: ('GPSImgDirection', ), + 0x0012: ('GPSMapDatum', ), + 0x0013: ('GPSDestLatitudeRef', ), + 0x0014: ('GPSDestLatitude', ), + 0x0015: ('GPSDestLongitudeRef', ), + 0x0016: ('GPSDestLongitude', ), + 0x0017: ('GPSDestBearingRef', ), + 0x0018: ('GPSDestBearing', ), + 0x0019: ('GPSDestDistanceRef', ), + 0x001A: ('GPSDestDistance', ), + 0x001D: ('GPSDate', ), + } + +# Ignore these tags when quick processing +# 0x927C is MakerNote Tags +# 0x9286 is user comment +IGNORE_TAGS=(0x9286, 0x927C) + +# http://tomtia.plala.jp/DigitalCamera/MakerNote/index.asp +def nikon_ev_bias(seq): + # First digit seems to be in steps of 1/6 EV. + # Does the third value mean the step size? It is usually 6, + # but it is 12 for the ExposureDifference. + # + # Check for an error condition that could cause a crash. + # This only happens if something has gone really wrong in + # reading the Nikon MakerNote. + if len( seq ) < 4 : return "" + # + if seq == [252, 1, 6, 0]: + return "-2/3 EV" + if seq == [253, 1, 6, 0]: + return "-1/2 EV" + if seq == [254, 1, 6, 0]: + return "-1/3 EV" + if seq == [0, 1, 6, 0]: + return "0 EV" + if seq == [2, 1, 6, 0]: + return "+1/3 EV" + if seq == [3, 1, 6, 0]: + return "+1/2 EV" + if seq == [4, 1, 6, 0]: + return "+2/3 EV" + # Handle combinations not in the table. + a = seq[0] + # Causes headaches for the +/- logic, so special case it. + if a == 0: + return "0 EV" + if a > 127: + a = 256 - a + ret_str = "-" + else: + ret_str = "+" + b = seq[2] # Assume third value means the step size + whole = a / b + a = a % b + if whole != 0: + ret_str = ret_str + str(whole) + " " + if a == 0: + ret_str = ret_str + "EV" + else: + r = Ratio(a, b) + ret_str = ret_str + r.__repr__() + " EV" + return ret_str + +# Nikon E99x MakerNote Tags +MAKERNOTE_NIKON_NEWER_TAGS={ + 0x0001: ('MakernoteVersion', make_string), # Sometimes binary + 0x0002: ('ISOSetting', make_string), + 0x0003: ('ColorMode', ), + 0x0004: ('Quality', ), + 0x0005: ('Whitebalance', ), + 0x0006: ('ImageSharpening', ), + 0x0007: ('FocusMode', ), + 0x0008: ('FlashSetting', ), + 0x0009: ('AutoFlashMode', ), + 0x000B: ('WhiteBalanceBias', ), + 0x000C: ('WhiteBalanceRBCoeff', ), + 0x000D: ('ProgramShift', nikon_ev_bias), + # Nearly the same as the other EV vals, but step size is 1/12 EV (?) + 0x000E: ('ExposureDifference', nikon_ev_bias), + 0x000F: ('ISOSelection', ), + 0x0011: ('NikonPreview', ), + 0x0012: ('FlashCompensation', nikon_ev_bias), + 0x0013: ('ISOSpeedRequested', ), + 0x0016: ('PhotoCornerCoordinates', ), + # 0x0017: Unknown, but most likely an EV value + 0x0018: ('FlashBracketCompensationApplied', nikon_ev_bias), + 0x0019: ('AEBracketCompensationApplied', ), + 0x001A: ('ImageProcessing', ), + 0x001B: ('CropHiSpeed', ), + 0x001D: ('SerialNumber', ), # Conflict with 0x00A0 ? + 0x001E: ('ColorSpace', ), + 0x001F: ('VRInfo', ), + 0x0020: ('ImageAuthentication', ), + 0x0022: ('ActiveDLighting', ), + 0x0023: ('PictureControl', ), + 0x0024: ('WorldTime', ), + 0x0025: ('ISOInfo', ), + 0x0080: ('ImageAdjustment', ), + 0x0081: ('ToneCompensation', ), + 0x0082: ('AuxiliaryLens', ), + 0x0083: ('LensType', ), + 0x0084: ('LensMinMaxFocalMaxAperture', ), + 0x0085: ('ManualFocusDistance', ), + 0x0086: ('DigitalZoomFactor', ), + 0x0087: ('FlashMode', + {0x00: 'Did Not Fire', + 0x01: 'Fired, Manual', + 0x07: 'Fired, External', + 0x08: 'Fired, Commander Mode ', + 0x09: 'Fired, TTL Mode'}), + 0x0088: ('AFFocusPosition', + {0x0000: 'Center', + 0x0100: 'Top', + 0x0200: 'Bottom', + 0x0300: 'Left', + 0x0400: 'Right'}), + 0x0089: ('BracketingMode', + {0x00: 'Single frame, no bracketing', + 0x01: 'Continuous, no bracketing', + 0x02: 'Timer, no bracketing', + 0x10: 'Single frame, exposure bracketing', + 0x11: 'Continuous, exposure bracketing', + 0x12: 'Timer, exposure bracketing', + 0x40: 'Single frame, white balance bracketing', + 0x41: 'Continuous, white balance bracketing', + 0x42: 'Timer, white balance bracketing'}), + 0x008A: ('AutoBracketRelease', ), + 0x008B: ('LensFStops', ), + 0x008C: ('NEFCurve1', ), # ExifTool calls this 'ContrastCurve' + 0x008D: ('ColorMode', ), + 0x008F: ('SceneMode', ), + 0x0090: ('LightingType', ), + 0x0091: ('ShotInfo', ), # First 4 bytes are a version number in ASCII + 0x0092: ('HueAdjustment', ), + # ExifTool calls this 'NEFCompression', should be 1-4 + 0x0093: ('Compression', ), + 0x0094: ('Saturation', + {-3: 'B&W', + -2: '-2', + -1: '-1', + 0: '0', + 1: '1', + 2: '2'}), + 0x0095: ('NoiseReduction', ), + 0x0096: ('NEFCurve2', ), # ExifTool calls this 'LinearizationTable' + 0x0097: ('ColorBalance', ), # First 4 bytes are a version number in ASCII + 0x0098: ('LensData', ), # First 4 bytes are a version number in ASCII + 0x0099: ('RawImageCenter', ), + 0x009A: ('SensorPixelSize', ), + 0x009C: ('Scene Assist', ), + 0x009E: ('RetouchHistory', ), + 0x00A0: ('SerialNumber', ), + 0x00A2: ('ImageDataSize', ), + # 00A3: unknown - a single byte 0 + # 00A4: In NEF, looks like a 4 byte ASCII version number ('0200') + 0x00A5: ('ImageCount', ), + 0x00A6: ('DeletedImageCount', ), + 0x00A7: ('TotalShutterReleases', ), + # First 4 bytes are a version number in ASCII, with version specific + # info to follow. Its hard to treat it as a string due to embedded nulls. + 0x00A8: ('FlashInfo', ), + 0x00A9: ('ImageOptimization', ), + 0x00AA: ('Saturation', ), + 0x00AB: ('DigitalVariProgram', ), + 0x00AC: ('ImageStabilization', ), + 0x00AD: ('Responsive AF', ), # 'AFResponse' + 0x00B0: ('MultiExposure', ), + 0x00B1: ('HighISONoiseReduction', ), + 0x00B7: ('AFInfo', ), + 0x00B8: ('FileInfo', ), + # 00B9: unknown + 0x0100: ('DigitalICE', ), + 0x0103: ('PreviewCompression', + {1: 'Uncompressed', + 2: 'CCITT 1D', + 3: 'T4/Group 3 Fax', + 4: 'T6/Group 4 Fax', + 5: 'LZW', + 6: 'JPEG (old-style)', + 7: 'JPEG', + 8: 'Adobe Deflate', + 9: 'JBIG B&W', + 10: 'JBIG Color', + 32766: 'Next', + 32769: 'Epson ERF Compressed', + 32771: 'CCIRLEW', + 32773: 'PackBits', + 32809: 'Thunderscan', + 32895: 'IT8CTPAD', + 32896: 'IT8LW', + 32897: 'IT8MP', + 32898: 'IT8BL', + 32908: 'PixarFilm', + 32909: 'PixarLog', + 32946: 'Deflate', + 32947: 'DCS', + 34661: 'JBIG', + 34676: 'SGILog', + 34677: 'SGILog24', + 34712: 'JPEG 2000', + 34713: 'Nikon NEF Compressed', + 65000: 'Kodak DCR Compressed', + 65535: 'Pentax PEF Compressed',}), + 0x0201: ('PreviewImageStart', ), + 0x0202: ('PreviewImageLength', ), + 0x0213: ('PreviewYCbCrPositioning', + {1: 'Centered', + 2: 'Co-sited'}), + 0x0010: ('DataDump', ), + } + +MAKERNOTE_NIKON_OLDER_TAGS = { + 0x0003: ('Quality', + {1: 'VGA Basic', + 2: 'VGA Normal', + 3: 'VGA Fine', + 4: 'SXGA Basic', + 5: 'SXGA Normal', + 6: 'SXGA Fine'}), + 0x0004: ('ColorMode', + {1: 'Color', + 2: 'Monochrome'}), + 0x0005: ('ImageAdjustment', + {0: 'Normal', + 1: 'Bright+', + 2: 'Bright-', + 3: 'Contrast+', + 4: 'Contrast-'}), + 0x0006: ('CCDSpeed', + {0: 'ISO 80', + 2: 'ISO 160', + 4: 'ISO 320', + 5: 'ISO 100'}), + 0x0007: ('WhiteBalance', + {0: 'Auto', + 1: 'Preset', + 2: 'Daylight', + 3: 'Incandescent', + 4: 'Fluorescent', + 5: 'Cloudy', + 6: 'Speed Light'}), + } + +# decode Olympus SpecialMode tag in MakerNote +def olympus_special_mode(v): + a={ + 0: 'Normal', + 1: 'Unknown', + 2: 'Fast', + 3: 'Panorama'} + b={ + 0: 'Non-panoramic', + 1: 'Left to right', + 2: 'Right to left', + 3: 'Bottom to top', + 4: 'Top to bottom'} + if v[0] not in a or v[2] not in b: + return v + return '%s - sequence %d - %s' % (a[v[0]], v[1], b[v[2]]) + +MAKERNOTE_OLYMPUS_TAGS={ + # ah HAH! those sneeeeeaky bastids! this is how they get past the fact + # that a JPEG thumbnail is not allowed in an uncompressed TIFF file + 0x0100: ('JPEGThumbnail', ), + 0x0200: ('SpecialMode', olympus_special_mode), + 0x0201: ('JPEGQual', + {1: 'SQ', + 2: 'HQ', + 3: 'SHQ'}), + 0x0202: ('Macro', + {0: 'Normal', + 1: 'Macro', + 2: 'SuperMacro'}), + 0x0203: ('BWMode', + {0: 'Off', + 1: 'On'}), + 0x0204: ('DigitalZoom', ), + 0x0205: ('FocalPlaneDiagonal', ), + 0x0206: ('LensDistortionParams', ), + 0x0207: ('SoftwareRelease', ), + 0x0208: ('PictureInfo', ), + 0x0209: ('CameraID', make_string), # print as string + 0x0F00: ('DataDump', ), + 0x0300: ('PreCaptureFrames', ), + 0x0404: ('SerialNumber', ), + 0x1000: ('ShutterSpeedValue', ), + 0x1001: ('ISOValue', ), + 0x1002: ('ApertureValue', ), + 0x1003: ('BrightnessValue', ), + 0x1004: ('FlashMode', ), + 0x1004: ('FlashMode', + {2: 'On', + 3: 'Off'}), + 0x1005: ('FlashDevice', + {0: 'None', + 1: 'Internal', + 4: 'External', + 5: 'Internal + External'}), + 0x1006: ('ExposureCompensation', ), + 0x1007: ('SensorTemperature', ), + 0x1008: ('LensTemperature', ), + 0x100b: ('FocusMode', + {0: 'Auto', + 1: 'Manual'}), + 0x1017: ('RedBalance', ), + 0x1018: ('BlueBalance', ), + 0x101a: ('SerialNumber', ), + 0x1023: ('FlashExposureComp', ), + 0x1026: ('ExternalFlashBounce', + {0: 'No', + 1: 'Yes'}), + 0x1027: ('ExternalFlashZoom', ), + 0x1028: ('ExternalFlashMode', ), + 0x1029: ('Contrast int16u', + {0: 'High', + 1: 'Normal', + 2: 'Low'}), + 0x102a: ('SharpnessFactor', ), + 0x102b: ('ColorControl', ), + 0x102c: ('ValidBits', ), + 0x102d: ('CoringFilter', ), + 0x102e: ('OlympusImageWidth', ), + 0x102f: ('OlympusImageHeight', ), + 0x1034: ('CompressionRatio', ), + 0x1035: ('PreviewImageValid', + {0: 'No', + 1: 'Yes'}), + 0x1036: ('PreviewImageStart', ), + 0x1037: ('PreviewImageLength', ), + 0x1039: ('CCDScanMode', + {0: 'Interlaced', + 1: 'Progressive'}), + 0x103a: ('NoiseReduction', + {0: 'Off', + 1: 'On'}), + 0x103b: ('InfinityLensStep', ), + 0x103c: ('NearLensStep', ), + + # TODO - these need extra definitions + # http://search.cpan.org/src/EXIFTOOL/Image-ExifTool-6.90/html/TagNames/Olympus.html + 0x2010: ('Equipment', ), + 0x2020: ('CameraSettings', ), + 0x2030: ('RawDevelopment', ), + 0x2040: ('ImageProcessing', ), + 0x2050: ('FocusInfo', ), + 0x3000: ('RawInfo ', ), + } + +# 0x2020 CameraSettings +MAKERNOTE_OLYMPUS_TAG_0x2020={ + 0x0100: ('PreviewImageValid', + {0: 'No', + 1: 'Yes'}), + 0x0101: ('PreviewImageStart', ), + 0x0102: ('PreviewImageLength', ), + 0x0200: ('ExposureMode', + {1: 'Manual', + 2: 'Program', + 3: 'Aperture-priority AE', + 4: 'Shutter speed priority AE', + 5: 'Program-shift'}), + 0x0201: ('AELock', + {0: 'Off', + 1: 'On'}), + 0x0202: ('MeteringMode', + {2: 'Center Weighted', + 3: 'Spot', + 5: 'ESP', + 261: 'Pattern+AF', + 515: 'Spot+Highlight control', + 1027: 'Spot+Shadow control'}), + 0x0300: ('MacroMode', + {0: 'Off', + 1: 'On'}), + 0x0301: ('FocusMode', + {0: 'Single AF', + 1: 'Sequential shooting AF', + 2: 'Continuous AF', + 3: 'Multi AF', + 10: 'MF'}), + 0x0302: ('FocusProcess', + {0: 'AF Not Used', + 1: 'AF Used'}), + 0x0303: ('AFSearch', + {0: 'Not Ready', + 1: 'Ready'}), + 0x0304: ('AFAreas', ), + 0x0401: ('FlashExposureCompensation', ), + 0x0500: ('WhiteBalance2', + {0: 'Auto', + 16: '7500K (Fine Weather with Shade)', + 17: '6000K (Cloudy)', + 18: '5300K (Fine Weather)', + 20: '3000K (Tungsten light)', + 21: '3600K (Tungsten light-like)', + 33: '6600K (Daylight fluorescent)', + 34: '4500K (Neutral white fluorescent)', + 35: '4000K (Cool white fluorescent)', + 48: '3600K (Tungsten light-like)', + 256: 'Custom WB 1', + 257: 'Custom WB 2', + 258: 'Custom WB 3', + 259: 'Custom WB 4', + 512: 'Custom WB 5400K', + 513: 'Custom WB 2900K', + 514: 'Custom WB 8000K', }), + 0x0501: ('WhiteBalanceTemperature', ), + 0x0502: ('WhiteBalanceBracket', ), + 0x0503: ('CustomSaturation', ), # (3 numbers: 1. CS Value, 2. Min, 3. Max) + 0x0504: ('ModifiedSaturation', + {0: 'Off', + 1: 'CM1 (Red Enhance)', + 2: 'CM2 (Green Enhance)', + 3: 'CM3 (Blue Enhance)', + 4: 'CM4 (Skin Tones)'}), + 0x0505: ('ContrastSetting', ), # (3 numbers: 1. Contrast, 2. Min, 3. Max) + 0x0506: ('SharpnessSetting', ), # (3 numbers: 1. Sharpness, 2. Min, 3. Max) + 0x0507: ('ColorSpace', + {0: 'sRGB', + 1: 'Adobe RGB', + 2: 'Pro Photo RGB'}), + 0x0509: ('SceneMode', + {0: 'Standard', + 6: 'Auto', + 7: 'Sport', + 8: 'Portrait', + 9: 'Landscape+Portrait', + 10: 'Landscape', + 11: 'Night scene', + 13: 'Panorama', + 16: 'Landscape+Portrait', + 17: 'Night+Portrait', + 19: 'Fireworks', + 20: 'Sunset', + 22: 'Macro', + 25: 'Documents', + 26: 'Museum', + 28: 'Beach&Snow', + 30: 'Candle', + 35: 'Underwater Wide1', + 36: 'Underwater Macro', + 39: 'High Key', + 40: 'Digital Image Stabilization', + 44: 'Underwater Wide2', + 45: 'Low Key', + 46: 'Children', + 48: 'Nature Macro'}), + 0x050a: ('NoiseReduction', + {0: 'Off', + 1: 'Noise Reduction', + 2: 'Noise Filter', + 3: 'Noise Reduction + Noise Filter', + 4: 'Noise Filter (ISO Boost)', + 5: 'Noise Reduction + Noise Filter (ISO Boost)'}), + 0x050b: ('DistortionCorrection', + {0: 'Off', + 1: 'On'}), + 0x050c: ('ShadingCompensation', + {0: 'Off', + 1: 'On'}), + 0x050d: ('CompressionFactor', ), + 0x050f: ('Gradation', + {'-1 -1 1': 'Low Key', + '0 -1 1': 'Normal', + '1 -1 1': 'High Key'}), + 0x0520: ('PictureMode', + {1: 'Vivid', + 2: 'Natural', + 3: 'Muted', + 256: 'Monotone', + 512: 'Sepia'}), + 0x0521: ('PictureModeSaturation', ), + 0x0522: ('PictureModeHue?', ), + 0x0523: ('PictureModeContrast', ), + 0x0524: ('PictureModeSharpness', ), + 0x0525: ('PictureModeBWFilter', + {0: 'n/a', + 1: 'Neutral', + 2: 'Yellow', + 3: 'Orange', + 4: 'Red', + 5: 'Green'}), + 0x0526: ('PictureModeTone', + {0: 'n/a', + 1: 'Neutral', + 2: 'Sepia', + 3: 'Blue', + 4: 'Purple', + 5: 'Green'}), + 0x0600: ('Sequence', ), # 2 or 3 numbers: 1. Mode, 2. Shot number, 3. Mode bits + 0x0601: ('PanoramaMode', ), # (2 numbers: 1. Mode, 2. Shot number) + 0x0603: ('ImageQuality2', + {1: 'SQ', + 2: 'HQ', + 3: 'SHQ', + 4: 'RAW'}), + 0x0901: ('ManometerReading', ), + } + + +MAKERNOTE_CASIO_TAGS={ + 0x0001: ('RecordingMode', + {1: 'Single Shutter', + 2: 'Panorama', + 3: 'Night Scene', + 4: 'Portrait', + 5: 'Landscape'}), + 0x0002: ('Quality', + {1: 'Economy', + 2: 'Normal', + 3: 'Fine'}), + 0x0003: ('FocusingMode', + {2: 'Macro', + 3: 'Auto Focus', + 4: 'Manual Focus', + 5: 'Infinity'}), + 0x0004: ('FlashMode', + {1: 'Auto', + 2: 'On', + 3: 'Off', + 4: 'Red Eye Reduction'}), + 0x0005: ('FlashIntensity', + {11: 'Weak', + 13: 'Normal', + 15: 'Strong'}), + 0x0006: ('Object Distance', ), + 0x0007: ('WhiteBalance', + {1: 'Auto', + 2: 'Tungsten', + 3: 'Daylight', + 4: 'Fluorescent', + 5: 'Shade', + 129: 'Manual'}), + 0x000B: ('Sharpness', + {0: 'Normal', + 1: 'Soft', + 2: 'Hard'}), + 0x000C: ('Contrast', + {0: 'Normal', + 1: 'Low', + 2: 'High'}), + 0x000D: ('Saturation', + {0: 'Normal', + 1: 'Low', + 2: 'High'}), + 0x0014: ('CCDSpeed', + {64: 'Normal', + 80: 'Normal', + 100: 'High', + 125: '+1.0', + 244: '+3.0', + 250: '+2.0'}), + } + +MAKERNOTE_FUJIFILM_TAGS={ + 0x0000: ('NoteVersion', make_string), + 0x1000: ('Quality', ), + 0x1001: ('Sharpness', + {1: 'Soft', + 2: 'Soft', + 3: 'Normal', + 4: 'Hard', + 5: 'Hard'}), + 0x1002: ('WhiteBalance', + {0: 'Auto', + 256: 'Daylight', + 512: 'Cloudy', + 768: 'DaylightColor-Fluorescent', + 769: 'DaywhiteColor-Fluorescent', + 770: 'White-Fluorescent', + 1024: 'Incandescent', + 3840: 'Custom'}), + 0x1003: ('Color', + {0: 'Normal', + 256: 'High', + 512: 'Low'}), + 0x1004: ('Tone', + {0: 'Normal', + 256: 'High', + 512: 'Low'}), + 0x1010: ('FlashMode', + {0: 'Auto', + 1: 'On', + 2: 'Off', + 3: 'Red Eye Reduction'}), + 0x1011: ('FlashStrength', ), + 0x1020: ('Macro', + {0: 'Off', + 1: 'On'}), + 0x1021: ('FocusMode', + {0: 'Auto', + 1: 'Manual'}), + 0x1030: ('SlowSync', + {0: 'Off', + 1: 'On'}), + 0x1031: ('PictureMode', + {0: 'Auto', + 1: 'Portrait', + 2: 'Landscape', + 4: 'Sports', + 5: 'Night', + 6: 'Program AE', + 256: 'Aperture Priority AE', + 512: 'Shutter Priority AE', + 768: 'Manual Exposure'}), + 0x1100: ('MotorOrBracket', + {0: 'Off', + 1: 'On'}), + 0x1300: ('BlurWarning', + {0: 'Off', + 1: 'On'}), + 0x1301: ('FocusWarning', + {0: 'Off', + 1: 'On'}), + 0x1302: ('AEWarning', + {0: 'Off', + 1: 'On'}), + } + +MAKERNOTE_CANON_TAGS = { + 0x0006: ('ImageType', ), + 0x0007: ('FirmwareVersion', ), + 0x0008: ('ImageNumber', ), + 0x0009: ('OwnerName', ), + } + +# this is in element offset, name, optional value dictionary format +MAKERNOTE_CANON_TAG_0x001 = { + 1: ('Macromode', + {1: 'Macro', + 2: 'Normal'}), + 2: ('SelfTimer', ), + 3: ('Quality', + {2: 'Normal', + 3: 'Fine', + 5: 'Superfine'}), + 4: ('FlashMode', + {0: 'Flash Not Fired', + 1: 'Auto', + 2: 'On', + 3: 'Red-Eye Reduction', + 4: 'Slow Synchro', + 5: 'Auto + Red-Eye Reduction', + 6: 'On + Red-Eye Reduction', + 16: 'external flash'}), + 5: ('ContinuousDriveMode', + {0: 'Single Or Timer', + 1: 'Continuous'}), + 7: ('FocusMode', + {0: 'One-Shot', + 1: 'AI Servo', + 2: 'AI Focus', + 3: 'MF', + 4: 'Single', + 5: 'Continuous', + 6: 'MF'}), + 10: ('ImageSize', + {0: 'Large', + 1: 'Medium', + 2: 'Small'}), + 11: ('EasyShootingMode', + {0: 'Full Auto', + 1: 'Manual', + 2: 'Landscape', + 3: 'Fast Shutter', + 4: 'Slow Shutter', + 5: 'Night', + 6: 'B&W', + 7: 'Sepia', + 8: 'Portrait', + 9: 'Sports', + 10: 'Macro/Close-Up', + 11: 'Pan Focus'}), + 12: ('DigitalZoom', + {0: 'None', + 1: '2x', + 2: '4x'}), + 13: ('Contrast', + {0xFFFF: 'Low', + 0: 'Normal', + 1: 'High'}), + 14: ('Saturation', + {0xFFFF: 'Low', + 0: 'Normal', + 1: 'High'}), + 15: ('Sharpness', + {0xFFFF: 'Low', + 0: 'Normal', + 1: 'High'}), + 16: ('ISO', + {0: 'See ISOSpeedRatings Tag', + 15: 'Auto', + 16: '50', + 17: '100', + 18: '200', + 19: '400'}), + 17: ('MeteringMode', + {3: 'Evaluative', + 4: 'Partial', + 5: 'Center-weighted'}), + 18: ('FocusType', + {0: 'Manual', + 1: 'Auto', + 3: 'Close-Up (Macro)', + 8: 'Locked (Pan Mode)'}), + 19: ('AFPointSelected', + {0x3000: 'None (MF)', + 0x3001: 'Auto-Selected', + 0x3002: 'Right', + 0x3003: 'Center', + 0x3004: 'Left'}), + 20: ('ExposureMode', + {0: 'Easy Shooting', + 1: 'Program', + 2: 'Tv-priority', + 3: 'Av-priority', + 4: 'Manual', + 5: 'A-DEP'}), + 23: ('LongFocalLengthOfLensInFocalUnits', ), + 24: ('ShortFocalLengthOfLensInFocalUnits', ), + 25: ('FocalUnitsPerMM', ), + 28: ('FlashActivity', + {0: 'Did Not Fire', + 1: 'Fired'}), + 29: ('FlashDetails', + {14: 'External E-TTL', + 13: 'Internal Flash', + 11: 'FP Sync Used', + 7: '2nd("Rear")-Curtain Sync Used', + 4: 'FP Sync Enabled'}), + 32: ('FocusMode', + {0: 'Single', + 1: 'Continuous'}), + } + +MAKERNOTE_CANON_TAG_0x004 = { + 7: ('WhiteBalance', + {0: 'Auto', + 1: 'Sunny', + 2: 'Cloudy', + 3: 'Tungsten', + 4: 'Fluorescent', + 5: 'Flash', + 6: 'Custom'}), + 9: ('SequenceNumber', ), + 14: ('AFPointUsed', ), + 15: ('FlashBias', + {0xFFC0: '-2 EV', + 0xFFCC: '-1.67 EV', + 0xFFD0: '-1.50 EV', + 0xFFD4: '-1.33 EV', + 0xFFE0: '-1 EV', + 0xFFEC: '-0.67 EV', + 0xFFF0: '-0.50 EV', + 0xFFF4: '-0.33 EV', + 0x0000: '0 EV', + 0x000C: '0.33 EV', + 0x0010: '0.50 EV', + 0x0014: '0.67 EV', + 0x0020: '1 EV', + 0x002C: '1.33 EV', + 0x0030: '1.50 EV', + 0x0034: '1.67 EV', + 0x0040: '2 EV'}), + 19: ('SubjectDistance', ), + } + +# extract multibyte integer in Motorola format (little endian) +def s2n_motorola(str): + x = 0 + for c in str: + x = (x << 8) | ord(c) + return x + +# extract multibyte integer in Intel format (big endian) +def s2n_intel(str): + x = 0 + y = 0L + for c in str: + x = x | (ord(c) << y) + y = y + 8 + return x + +# ratio object that eventually will be able to reduce itself to lowest +# common denominator for printing +def gcd(a, b): + if b == 0: + return a + else: + return gcd(b, a % b) + +class Ratio: + def __init__(self, num, den): + self.num = num + self.den = den + + def __repr__(self): + self.reduce() + if self.den == 1: + return str(self.num) + return '%d/%d' % (self.num, self.den) + + def reduce(self): + div = gcd(self.num, self.den) + if div > 1: + self.num = self.num / div + self.den = self.den / div + +# for ease of dealing with tags +class IFD_Tag: + def __init__(self, printable, tag, field_type, values, field_offset, + field_length): + # printable version of data + self.printable = printable + # tag ID number + self.tag = tag + # field type as index into FIELD_TYPES + self.field_type = field_type + # offset of start of field in bytes from beginning of IFD + self.field_offset = field_offset + # length of data field in bytes + self.field_length = field_length + # either a string or array of data items + self.values = values + + def __str__(self): + return self.printable + + def __repr__(self): + return '(0x%04X) %s=%s @ %d' % (self.tag, + FIELD_TYPES[self.field_type][2], + self.printable, + self.field_offset) + +# class that handles an EXIF header +class EXIF_header: + def __init__(self, file, endian, offset, fake_exif, strict, debug=0): + self.file = file + self.endian = endian + self.offset = offset + self.fake_exif = fake_exif + self.strict = strict + self.debug = debug + self.tags = {} + + # convert slice to integer, based on sign and endian flags + # usually this offset is assumed to be relative to the beginning of the + # start of the EXIF information. For some cameras that use relative tags, + # this offset may be relative to some other starting point. + def s2n(self, offset, length, signed=0): + self.file.seek(self.offset+offset) + slice=self.file.read(length) + if self.endian == 'I': + val=s2n_intel(slice) + else: + val=s2n_motorola(slice) + # Sign extension ? + if signed: + msb=1L << (8*length-1) + if val & msb: + val=val-(msb << 1) + return val + + # convert offset to string + def n2s(self, offset, length): + s = '' + for dummy in range(length): + if self.endian == 'I': + s = s + chr(offset & 0xFF) + else: + s = chr(offset & 0xFF) + s + offset = offset >> 8 + return s + + # return first IFD + def first_IFD(self): + return self.s2n(4, 4) + + # return pointer to next IFD + def next_IFD(self, ifd): + entries=self.s2n(ifd, 2) + return self.s2n(ifd+2+12*entries, 4) + + # return list of IFDs in header + def list_IFDs(self): + i=self.first_IFD() + a=[] + while i: + a.append(i) + i=self.next_IFD(i) + return a + + # return list of entries in this IFD + def dump_IFD(self, ifd, ifd_name, dict=EXIF_TAGS, relative=0, stop_tag='UNDEF'): + entries=self.s2n(ifd, 2) + for i in range(entries): + # entry is index of start of this IFD in the file + entry = ifd + 2 + 12 * i + tag = self.s2n(entry, 2) + + # get tag name early to avoid errors, help debug + tag_entry = dict.get(tag) + if tag_entry: + tag_name = tag_entry[0] + else: + tag_name = 'Tag 0x%04X' % tag + + # ignore certain tags for faster processing + if not (not detailed and tag in IGNORE_TAGS): + field_type = self.s2n(entry + 2, 2) + + # unknown field type + if not 0 < field_type < len(FIELD_TYPES): + if not self.strict: + continue + else: + raise ValueError('unknown type %d in tag 0x%04X' % (field_type, tag)) + + typelen = FIELD_TYPES[field_type][0] + count = self.s2n(entry + 4, 4) + # Adjust for tag id/type/count (2+2+4 bytes) + # Now we point at either the data or the 2nd level offset + offset = entry + 8 + + # If the value fits in 4 bytes, it is inlined, else we + # need to jump ahead again. + if count * typelen > 4: + # offset is not the value; it's a pointer to the value + # if relative we set things up so s2n will seek to the right + # place when it adds self.offset. Note that this 'relative' + # is for the Nikon type 3 makernote. Other cameras may use + # other relative offsets, which would have to be computed here + # slightly differently. + if relative: + tmp_offset = self.s2n(offset, 4) + offset = tmp_offset + ifd - 8 + if self.fake_exif: + offset = offset + 18 + else: + offset = self.s2n(offset, 4) + + field_offset = offset + if field_type == 2: + # special case: null-terminated ASCII string + # XXX investigate + # sometimes gets too big to fit in int value + if count != 0 and count < (2**31): + self.file.seek(self.offset + offset) + values = self.file.read(count) + #print values + # Drop any garbage after a null. + values = values.split('\x00', 1)[0] + else: + values = '' + else: + values = [] + signed = (field_type in [6, 8, 9, 10]) + + # XXX investigate + # some entries get too big to handle could be malformed + # file or problem with self.s2n + if count < 1000: + for dummy in range(count): + if field_type in (5, 10): + # a ratio + value = Ratio(self.s2n(offset, 4, signed), + self.s2n(offset + 4, 4, signed)) + else: + value = self.s2n(offset, typelen, signed) + values.append(value) + offset = offset + typelen + # The test above causes problems with tags that are + # supposed to have long values! Fix up one important case. + elif tag_name == 'MakerNote' : + for dummy in range(count): + value = self.s2n(offset, typelen, signed) + values.append(value) + offset = offset + typelen + #else : + # print "Warning: dropping large tag:", tag, tag_name + + # now 'values' is either a string or an array + if count == 1 and field_type != 2: + printable=str(values[0]) + elif count > 50 and len(values) > 20 : + printable=str( values[0:20] )[0:-1] + ", ... ]" + else: + printable=str(values) + + # compute printable version of values + if tag_entry: + if len(tag_entry) != 1: + # optional 2nd tag element is present + if callable(tag_entry[1]): + # call mapping function + printable = tag_entry[1](values) + else: + printable = '' + for i in values: + # use lookup table for this tag + printable += tag_entry[1].get(i, repr(i)) + + self.tags[ifd_name + ' ' + tag_name] = IFD_Tag(printable, tag, + field_type, + values, field_offset, + count * typelen) + if self.debug: + print ' debug: %s: %s' % (tag_name, + repr(self.tags[ifd_name + ' ' + tag_name])) + + if tag_name == stop_tag: + break + + # extract uncompressed TIFF thumbnail (like pulling teeth) + # we take advantage of the pre-existing layout in the thumbnail IFD as + # much as possible + def extract_TIFF_thumbnail(self, thumb_ifd): + entries = self.s2n(thumb_ifd, 2) + # this is header plus offset to IFD ... + if self.endian == 'M': + tiff = 'MM\x00*\x00\x00\x00\x08' + else: + tiff = 'II*\x00\x08\x00\x00\x00' + # ... plus thumbnail IFD data plus a null "next IFD" pointer + self.file.seek(self.offset+thumb_ifd) + tiff += self.file.read(entries*12+2)+'\x00\x00\x00\x00' + + # fix up large value offset pointers into data area + for i in range(entries): + entry = thumb_ifd + 2 + 12 * i + tag = self.s2n(entry, 2) + field_type = self.s2n(entry+2, 2) + typelen = FIELD_TYPES[field_type][0] + count = self.s2n(entry+4, 4) + oldoff = self.s2n(entry+8, 4) + # start of the 4-byte pointer area in entry + ptr = i * 12 + 18 + # remember strip offsets location + if tag == 0x0111: + strip_off = ptr + strip_len = count * typelen + # is it in the data area? + if count * typelen > 4: + # update offset pointer (nasty "strings are immutable" crap) + # should be able to say "tiff[ptr:ptr+4]=newoff" + newoff = len(tiff) + tiff = tiff[:ptr] + self.n2s(newoff, 4) + tiff[ptr+4:] + # remember strip offsets location + if tag == 0x0111: + strip_off = newoff + strip_len = 4 + # get original data and store it + self.file.seek(self.offset + oldoff) + tiff += self.file.read(count * typelen) + + # add pixel strips and update strip offset info + old_offsets = self.tags['Thumbnail StripOffsets'].values + old_counts = self.tags['Thumbnail StripByteCounts'].values + for i in range(len(old_offsets)): + # update offset pointer (more nasty "strings are immutable" crap) + offset = self.n2s(len(tiff), strip_len) + tiff = tiff[:strip_off] + offset + tiff[strip_off + strip_len:] + strip_off += strip_len + # add pixel strip to end + self.file.seek(self.offset + old_offsets[i]) + tiff += self.file.read(old_counts[i]) + + self.tags['TIFFThumbnail'] = tiff + + # decode all the camera-specific MakerNote formats + + # Note is the data that comprises this MakerNote. The MakerNote will + # likely have pointers in it that point to other parts of the file. We'll + # use self.offset as the starting point for most of those pointers, since + # they are relative to the beginning of the file. + # + # If the MakerNote is in a newer format, it may use relative addressing + # within the MakerNote. In that case we'll use relative addresses for the + # pointers. + # + # As an aside: it's not just to be annoying that the manufacturers use + # relative offsets. It's so that if the makernote has to be moved by the + # picture software all of the offsets don't have to be adjusted. Overall, + # this is probably the right strategy for makernotes, though the spec is + # ambiguous. (The spec does not appear to imagine that makernotes would + # follow EXIF format internally. Once they did, it's ambiguous whether + # the offsets should be from the header at the start of all the EXIF info, + # or from the header at the start of the makernote.) + def decode_maker_note(self): + note = self.tags['EXIF MakerNote'] + + # Some apps use MakerNote tags but do not use a format for which we + # have a description, so just do a raw dump for these. + #if self.tags.has_key('Image Make'): + make = self.tags['Image Make'].printable + #else: + # make = '' + + # model = self.tags['Image Model'].printable # unused + + # Nikon + # The maker note usually starts with the word Nikon, followed by the + # type of the makernote (1 or 2, as a short). If the word Nikon is + # not at the start of the makernote, it's probably type 2, since some + # cameras work that way. + if 'NIKON' in make: + if note.values[0:7] == [78, 105, 107, 111, 110, 0, 1]: + if self.debug: + print "Looks like a type 1 Nikon MakerNote." + self.dump_IFD(note.field_offset+8, 'MakerNote', + dict=MAKERNOTE_NIKON_OLDER_TAGS) + elif note.values[0:7] == [78, 105, 107, 111, 110, 0, 2]: + if self.debug: + print "Looks like a labeled type 2 Nikon MakerNote" + if note.values[12:14] != [0, 42] and note.values[12:14] != [42L, 0L]: + raise ValueError("Missing marker tag '42' in MakerNote.") + # skip the Makernote label and the TIFF header + self.dump_IFD(note.field_offset+10+8, 'MakerNote', + dict=MAKERNOTE_NIKON_NEWER_TAGS, relative=1) + else: + # E99x or D1 + if self.debug: + print "Looks like an unlabeled type 2 Nikon MakerNote" + self.dump_IFD(note.field_offset, 'MakerNote', + dict=MAKERNOTE_NIKON_NEWER_TAGS) + return + + # Olympus + if make.startswith('OLYMPUS'): + self.dump_IFD(note.field_offset+8, 'MakerNote', + dict=MAKERNOTE_OLYMPUS_TAGS) + # XXX TODO + #for i in (('MakerNote Tag 0x2020', MAKERNOTE_OLYMPUS_TAG_0x2020),): + # self.decode_olympus_tag(self.tags[i[0]].values, i[1]) + #return + + # Casio + if 'CASIO' in make or 'Casio' in make: + self.dump_IFD(note.field_offset, 'MakerNote', + dict=MAKERNOTE_CASIO_TAGS) + return + + # Fujifilm + if make == 'FUJIFILM': + # bug: everything else is "Motorola" endian, but the MakerNote + # is "Intel" endian + endian = self.endian + self.endian = 'I' + # bug: IFD offsets are from beginning of MakerNote, not + # beginning of file header + offset = self.offset + self.offset += note.field_offset + # process note with bogus values (note is actually at offset 12) + self.dump_IFD(12, 'MakerNote', dict=MAKERNOTE_FUJIFILM_TAGS) + # reset to correct values + self.endian = endian + self.offset = offset + return + + # Canon + if make == 'Canon': + self.dump_IFD(note.field_offset, 'MakerNote', + dict=MAKERNOTE_CANON_TAGS) + for i in (('MakerNote Tag 0x0001', MAKERNOTE_CANON_TAG_0x001), + ('MakerNote Tag 0x0004', MAKERNOTE_CANON_TAG_0x004)): + self.canon_decode_tag(self.tags[i[0]].values, i[1]) + return + + + # XXX TODO decode Olympus MakerNote tag based on offset within tag + def olympus_decode_tag(self, value, dict): + pass + + # decode Canon MakerNote tag based on offset within tag + # see http://www.burren.cx/david/canon.html by David Burren + def canon_decode_tag(self, value, dict): + for i in range(1, len(value)): + x=dict.get(i, ('Unknown', )) + if self.debug: + print i, x + name=x[0] + if len(x) > 1: + val=x[1].get(value[i], 'Unknown') + else: + val=value[i] + # it's not a real IFD Tag but we fake one to make everybody + # happy. this will have a "proprietary" type + self.tags['MakerNote '+name]=IFD_Tag(str(val), None, 0, None, + None, None) + +# process an image file (expects an open file object) +# this is the function that has to deal with all the arbitrary nasty bits +# of the EXIF standard +def process_file(f, stop_tag='UNDEF', details=True, strict=False, debug=False): + # yah it's cheesy... + global detailed + detailed = details + + # by default do not fake an EXIF beginning + fake_exif = 0 + + # determine whether it's a JPEG or TIFF + data = f.read(12) + if data[0:4] in ['II*\x00', 'MM\x00*']: + # it's a TIFF file + f.seek(0) + endian = f.read(1) + f.read(1) + offset = 0 + elif data[0:2] == '\xFF\xD8': + # it's a JPEG file + while data[2] == '\xFF' and data[6:10] in ('JFIF', 'JFXX', 'OLYM', 'Phot'): + length = ord(data[4])*256+ord(data[5]) + f.read(length-8) + # fake an EXIF beginning of file + data = '\xFF\x00'+f.read(10) + fake_exif = 1 + if data[2] == '\xFF' and data[6:10] == 'Exif': + # detected EXIF header + offset = f.tell() + endian = f.read(1) + else: + # no EXIF information + return {} + else: + # file format not recognized + return {} + + # deal with the EXIF info we found + if debug: + print {'I': 'Intel', 'M': 'Motorola'}[endian], 'format' + hdr = EXIF_header(f, endian, offset, fake_exif, strict, debug) + ifd_list = hdr.list_IFDs() + ctr = 0 + for i in ifd_list: + if ctr == 0: + IFD_name = 'Image' + elif ctr == 1: + IFD_name = 'Thumbnail' + thumb_ifd = i + else: + IFD_name = 'IFD %d' % ctr + if debug: + print ' IFD %d (%s) at offset %d:' % (ctr, IFD_name, i) + hdr.dump_IFD(i, IFD_name, stop_tag=stop_tag) + # EXIF IFD + exif_off = hdr.tags.get(IFD_name+' ExifOffset') + if exif_off: + if debug: + print ' EXIF SubIFD at offset %d:' % exif_off.values[0] + hdr.dump_IFD(exif_off.values[0], 'EXIF', stop_tag=stop_tag) + # Interoperability IFD contained in EXIF IFD + intr_off = hdr.tags.get('EXIF SubIFD InteroperabilityOffset') + if intr_off: + if debug: + print ' EXIF Interoperability SubSubIFD at offset %d:' \ + % intr_off.values[0] + hdr.dump_IFD(intr_off.values[0], 'EXIF Interoperability', + dict=INTR_TAGS, stop_tag=stop_tag) + # GPS IFD + gps_off = hdr.tags.get(IFD_name+' GPSInfo') + if gps_off: + if debug: + print ' GPS SubIFD at offset %d:' % gps_off.values[0] + hdr.dump_IFD(gps_off.values[0], 'GPS', dict=GPS_TAGS, stop_tag=stop_tag) + ctr += 1 + + # extract uncompressed TIFF thumbnail + thumb = hdr.tags.get('Thumbnail Compression') + if thumb and thumb.printable == 'Uncompressed TIFF': + hdr.extract_TIFF_thumbnail(thumb_ifd) + + # JPEG thumbnail (thankfully the JPEG data is stored as a unit) + thumb_off = hdr.tags.get('Thumbnail JPEGInterchangeFormat') + if thumb_off: + f.seek(offset+thumb_off.values[0]) + size = hdr.tags['Thumbnail JPEGInterchangeFormatLength'].values[0] + hdr.tags['JPEGThumbnail'] = f.read(size) + + # deal with MakerNote contained in EXIF IFD + # (Some apps use MakerNote tags but do not use a format for which we + # have a description, do not process these). + if 'EXIF MakerNote' in hdr.tags and 'Image Make' in hdr.tags and detailed: + hdr.decode_maker_note() + + # Sometimes in a TIFF file, a JPEG thumbnail is hidden in the MakerNote + # since it's not allowed in a uncompressed TIFF IFD + if 'JPEGThumbnail' not in hdr.tags: + thumb_off=hdr.tags.get('MakerNote JPEGThumbnail') + if thumb_off: + f.seek(offset+thumb_off.values[0]) + hdr.tags['JPEGThumbnail']=file.read(thumb_off.field_length) + + return hdr.tags + + +# show command line usage +def usage(exit_status): + msg = 'Usage: EXIF.py [OPTIONS] file1 [file2 ...]\n' + msg += 'Extract EXIF information from digital camera image files.\n\nOptions:\n' + msg += '-q --quick Do not process MakerNotes.\n' + msg += '-t TAG --stop-tag TAG Stop processing when this tag is retrieved.\n' + msg += '-s --strict Run in strict mode (stop on errors).\n' + msg += '-d --debug Run in debug mode (display extra info).\n' + print msg + sys.exit(exit_status) + +# library test/debug function (dump given files) +if __name__ == '__main__': + import sys + import getopt + + # parse command line options/arguments + try: + opts, args = getopt.getopt(sys.argv[1:], "hqsdt:v", ["help", "quick", "strict", "debug", "stop-tag="]) + except getopt.GetoptError: + usage(2) + if args == []: + usage(2) + detailed = True + stop_tag = 'UNDEF' + debug = False + strict = False + for o, a in opts: + if o in ("-h", "--help"): + usage(0) + if o in ("-q", "--quick"): + detailed = False + if o in ("-t", "--stop-tag"): + stop_tag = a + if o in ("-s", "--strict"): + strict = True + if o in ("-d", "--debug"): + debug = True + + # output info for each file + for filename in args: + try: + file=open(filename, 'rb') + except: + print "'%s' is unreadable\n"%filename + continue + print filename + ':' + # get the tags + data = process_file(file, stop_tag=stop_tag, details=detailed, strict=strict, debug=debug) + if not data: + print 'No EXIF information found' + continue + + x=data.keys() + x.sort() + for i in x: + if i in ('JPEGThumbnail', 'TIFFThumbnail'): + continue + try: + print ' %s (%s): %s' % \ + (i, FIELD_TYPES[data[i].field_type][2], data[i].printable) + except: + print 'error', i, '"', data[i], '"' + if 'JPEGThumbnail' in data: + print 'File has JPEG thumbnail' + print + diff --git a/image_filer/utils/__init__.py b/image_filer/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/image_filer/utils/files.py b/image_filer/utils/files.py new file mode 100644 index 0000000..8f730c9 --- /dev/null +++ b/image_filer/utils/files.py @@ -0,0 +1,21 @@ +import os + +from image_filer.utils.zip import unzip + +def generic_handle_file(file, original_filename): + """ + Handels a file, regardless if a package or a single file and returns + a list of files. can recursively unpack packages. + """ + print "entering generic_handle_file(file=%s, original_filename=%s)" % (file, original_filename) + files = [] + filetype = os.path.splitext(original_filename)[1].lower() + print filetype + if filetype=='.zip': + unpacked_files = unzip(file) + for ufile, ufilename in unpacked_files: + files += generic_handle_file(ufile, ufilename) + else: + files.append( (file,original_filename) ) + print "result of generic_handle_file: ", files + return files \ No newline at end of file diff --git a/image_filer/utils/zip.py b/image_filer/utils/zip.py new file mode 100644 index 0000000..6d36e84 --- /dev/null +++ b/image_filer/utils/zip.py @@ -0,0 +1,27 @@ +import os +#import zipfile +# zipfile.open() is only available in Python 2.6, so we use the future version +from django.core.files.uploadedfile import SimpleUploadedFile +from image_filer.utils import zipfile + +def unzip(file): + """ + Take a path to a zipfile and checks if it is a valid zip file + and returns... + """ + files = [] + # TODO: implement try-except here + zip = zipfile.ZipFile(file) + bad_file = zip.testzip() + if bad_file: + raise Exception('"%s" in the .zip archive is corrupt.' % bad_file) + infolist = zip.infolist() + print infolist + for zipinfo in infolist: + print "handling %s" % zipinfo.filename + if zipinfo.filename.startswith('__'): # do not process meta files + continue + thefile = SimpleUploadedFile(name=zipinfo.filename, content=zip.read(zipinfo)) + files.append( (thefile, zipinfo.filename) ) + zip.close() + return files diff --git a/image_filer/utils/zipfile.py b/image_filer/utils/zipfile.py new file mode 100644 index 0000000..99fb7b4 --- /dev/null +++ b/image_filer/utils/zipfile.py @@ -0,0 +1,1418 @@ +""" +Read and write ZIP files. +""" +# hack so this file that is actually part of python 2.6 works in older versions +from __future__ import with_statement + +import struct, os, time, sys, shutil +import binascii, cStringIO, stat + +try: + import zlib # We may need its compression method + crc32 = zlib.crc32 +except ImportError: + zlib = None + crc32 = binascii.crc32 + +__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile", + "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ] + +class BadZipfile(Exception): + pass + + +class LargeZipFile(Exception): + """ + Raised when writing a zipfile, the zipfile requires ZIP64 extensions + and those extensions are disabled. + """ + +error = BadZipfile # The exception raised by this module + +ZIP64_LIMIT = (1 << 31) - 1 +ZIP_FILECOUNT_LIMIT = 1 << 16 +ZIP_MAX_COMMENT = (1 << 16) - 1 + +# constants for Zip file compression methods +ZIP_STORED = 0 +ZIP_DEFLATED = 8 +# Other ZIP compression methods not supported + +# Below are some formats and associated data for reading/writing headers using +# the struct module. The names and structures of headers/records are those used +# in the PKWARE description of the ZIP file format: +# http://www.pkware.com/documents/casestudies/APPNOTE.TXT +# (URL valid as of January 2008) + +# The "end of central directory" structure, magic number, size, and indices +# (section V.I in the format document) +structEndArchive = "<4s4H2LH" +stringEndArchive = "PK\005\006" +sizeEndCentDir = struct.calcsize(structEndArchive) + +_ECD_SIGNATURE = 0 +_ECD_DISK_NUMBER = 1 +_ECD_DISK_START = 2 +_ECD_ENTRIES_THIS_DISK = 3 +_ECD_ENTRIES_TOTAL = 4 +_ECD_SIZE = 5 +_ECD_OFFSET = 6 +_ECD_COMMENT_SIZE = 7 +# These last two indices are not part of the structure as defined in the +# spec, but they are used internally by this module as a convenience +_ECD_COMMENT = 8 +_ECD_LOCATION = 9 + +# The "central directory" structure, magic number, size, and indices +# of entries in the structure (section V.F in the format document) +structCentralDir = "<4s4B4HL2L5H2L" +stringCentralDir = "PK\001\002" +sizeCentralDir = struct.calcsize(structCentralDir) + +# indexes of entries in the central directory structure +_CD_SIGNATURE = 0 +_CD_CREATE_VERSION = 1 +_CD_CREATE_SYSTEM = 2 +_CD_EXTRACT_VERSION = 3 +_CD_EXTRACT_SYSTEM = 4 +_CD_FLAG_BITS = 5 +_CD_COMPRESS_TYPE = 6 +_CD_TIME = 7 +_CD_DATE = 8 +_CD_CRC = 9 +_CD_COMPRESSED_SIZE = 10 +_CD_UNCOMPRESSED_SIZE = 11 +_CD_FILENAME_LENGTH = 12 +_CD_EXTRA_FIELD_LENGTH = 13 +_CD_COMMENT_LENGTH = 14 +_CD_DISK_NUMBER_START = 15 +_CD_INTERNAL_FILE_ATTRIBUTES = 16 +_CD_EXTERNAL_FILE_ATTRIBUTES = 17 +_CD_LOCAL_HEADER_OFFSET = 18 + +# The "local file header" structure, magic number, size, and indices +# (section V.A in the format document) +structFileHeader = "<4s2B4HL2L2H" +stringFileHeader = "PK\003\004" +sizeFileHeader = struct.calcsize(structFileHeader) + +_FH_SIGNATURE = 0 +_FH_EXTRACT_VERSION = 1 +_FH_EXTRACT_SYSTEM = 2 +_FH_GENERAL_PURPOSE_FLAG_BITS = 3 +_FH_COMPRESSION_METHOD = 4 +_FH_LAST_MOD_TIME = 5 +_FH_LAST_MOD_DATE = 6 +_FH_CRC = 7 +_FH_COMPRESSED_SIZE = 8 +_FH_UNCOMPRESSED_SIZE = 9 +_FH_FILENAME_LENGTH = 10 +_FH_EXTRA_FIELD_LENGTH = 11 + +# The "Zip64 end of central directory locator" structure, magic number, and size +structEndArchive64Locator = "<4sLQL" +stringEndArchive64Locator = "PK\x06\x07" +sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator) + +# The "Zip64 end of central directory" record, magic number, size, and indices +# (section V.G in the format document) +structEndArchive64 = "<4sQ2H2L4Q" +stringEndArchive64 = "PK\x06\x06" +sizeEndCentDir64 = struct.calcsize(structEndArchive64) + +_CD64_SIGNATURE = 0 +_CD64_DIRECTORY_RECSIZE = 1 +_CD64_CREATE_VERSION = 2 +_CD64_EXTRACT_VERSION = 3 +_CD64_DISK_NUMBER = 4 +_CD64_DISK_NUMBER_START = 5 +_CD64_NUMBER_ENTRIES_THIS_DISK = 6 +_CD64_NUMBER_ENTRIES_TOTAL = 7 +_CD64_DIRECTORY_SIZE = 8 +_CD64_OFFSET_START_CENTDIR = 9 + +def _check_zipfile(fp): + try: + if _EndRecData(fp): + return True # file has correct magic number + except IOError: + pass + return False + +def is_zipfile(filename): + """Quickly see if a file is a ZIP file by checking the magic number. + + The filename argument may be a file or file-like object too. + """ + result = False + try: + if hasattr(filename, "read"): + result = _check_zipfile(fp=filename) + else: + with open(filename, "rb") as fp: + result = _check_zipfile(fp) + except IOError: + pass + return result + +def _EndRecData64(fpin, offset, endrec): + """ + Read the ZIP64 end-of-archive records and use that to update endrec + """ + fpin.seek(offset - sizeEndCentDir64Locator, 2) + data = fpin.read(sizeEndCentDir64Locator) + sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data) + if sig != stringEndArchive64Locator: + return endrec + + if diskno != 0 or disks != 1: + raise BadZipfile("zipfiles that span multiple disks are not supported") + + # Assume no 'zip64 extensible data' + fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2) + data = fpin.read(sizeEndCentDir64) + sig, sz, create_version, read_version, disk_num, disk_dir, \ + dircount, dircount2, dirsize, diroffset = \ + struct.unpack(structEndArchive64, data) + if sig != stringEndArchive64: + return endrec + + # Update the original endrec using data from the ZIP64 record + endrec[_ECD_SIGNATURE] = sig + endrec[_ECD_DISK_NUMBER] = disk_num + endrec[_ECD_DISK_START] = disk_dir + endrec[_ECD_ENTRIES_THIS_DISK] = dircount + endrec[_ECD_ENTRIES_TOTAL] = dircount2 + endrec[_ECD_SIZE] = dirsize + endrec[_ECD_OFFSET] = diroffset + return endrec + + +def _EndRecData(fpin): + """Return data from the "End of Central Directory" record, or None. + + The data is a list of the nine items in the ZIP "End of central dir" + record followed by a tenth item, the file seek offset of this record.""" + + # Determine file size + fpin.seek(0, 2) + filesize = fpin.tell() + + # Check to see if this is ZIP file with no archive comment (the + # "end of central directory" structure should be the last item in the + # file if this is the case). + fpin.seek(-sizeEndCentDir, 2) + data = fpin.read() + if data[0:4] == stringEndArchive and data[-2:] == "\000\000": + # the signature is correct and there's no comment, unpack structure + endrec = struct.unpack(structEndArchive, data) + endrec=list(endrec) + + # Append a blank comment and record start offset + endrec.append("") + endrec.append(filesize - sizeEndCentDir) + + # Try to read the "Zip64 end of central directory" structure + return _EndRecData64(fpin, -sizeEndCentDir, endrec) + + # Either this is not a ZIP file, or it is a ZIP file with an archive + # comment. Search the end of the file for the "end of central directory" + # record signature. The comment is the last item in the ZIP file and may be + # up to 64K long. It is assumed that the "end of central directory" magic + # number does not appear in the comment. + maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0) + fpin.seek(maxCommentStart, 0) + data = fpin.read() + start = data.rfind(stringEndArchive) + if start >= 0: + # found the magic number; attempt to unpack and interpret + recData = data[start:start+sizeEndCentDir] + endrec = list(struct.unpack(structEndArchive, recData)) + comment = data[start+sizeEndCentDir:] + # check that comment length is correct + if endrec[_ECD_COMMENT_SIZE] == len(comment): + # Append the archive comment and start offset + endrec.append(comment) + endrec.append(maxCommentStart + start) + + # Try to read the "Zip64 end of central directory" structure + return _EndRecData64(fpin, maxCommentStart + start - filesize, + endrec) + + # Unable to find a valid end of central directory structure + return + + +class ZipInfo (object): + """Class with attributes describing each file in the ZIP archive.""" + + __slots__ = ( + 'orig_filename', + 'filename', + 'date_time', + 'compress_type', + 'comment', + 'extra', + 'create_system', + 'create_version', + 'extract_version', + 'reserved', + 'flag_bits', + 'volume', + 'internal_attr', + 'external_attr', + 'header_offset', + 'CRC', + 'compress_size', + 'file_size', + '_raw_time', + ) + + def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): + self.orig_filename = filename # Original file name in archive + + # Terminate the file name at the first null byte. Null bytes in file + # names are used as tricks by viruses in archives. + null_byte = filename.find(chr(0)) + if null_byte >= 0: + filename = filename[0:null_byte] + # This is used to ensure paths in generated ZIP files always use + # forward slashes as the directory separator, as required by the + # ZIP format specification. + if os.sep != "/" and os.sep in filename: + filename = filename.replace(os.sep, "/") + + self.filename = filename # Normalized file name + self.date_time = date_time # year, month, day, hour, min, sec + # Standard values: + self.compress_type = ZIP_STORED # Type of compression for the file + self.comment = "" # Comment for each file + self.extra = "" # ZIP extra data + if sys.platform == 'win32': + self.create_system = 0 # System which created ZIP archive + else: + # Assume everything else is unix-y + self.create_system = 3 # System which created ZIP archive + self.create_version = 20 # Version which created ZIP archive + self.extract_version = 20 # Version needed to extract archive + self.reserved = 0 # Must be zero + self.flag_bits = 0 # ZIP flag bits + self.volume = 0 # Volume number of file header + self.internal_attr = 0 # Internal attributes + self.external_attr = 0 # External file attributes + # Other attributes are set by class ZipFile: + # header_offset Byte offset to the file header + # CRC CRC-32 of the uncompressed file + # compress_size Size of the compressed file + # file_size Size of the uncompressed file + + def FileHeader(self): + """Return the per-file header as a string.""" + dt = self.date_time + dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] + dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) + if self.flag_bits & 0x08: + # Set these to zero because we write them after the file data + CRC = compress_size = file_size = 0 + else: + CRC = self.CRC + compress_size = self.compress_size + file_size = self.file_size + + extra = self.extra + + if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT: + # File is larger than what fits into a 4 byte integer, + # fall back to the ZIP64 extension + fmt = '= 24: + counts = unpack('> 1) & 0x7FFFFFFF) ^ poly + else: + crc = ((crc >> 1) & 0x7FFFFFFF) + table[i] = crc + return table + crctable = _GenerateCRCTable() + + def _crc32(self, ch, crc): + """Compute the CRC32 primitive on one byte.""" + return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff] + + def __init__(self, pwd): + self.key0 = 305419896 + self.key1 = 591751049 + self.key2 = 878082192 + for p in pwd: + self._UpdateKeys(p) + + def _UpdateKeys(self, c): + self.key0 = self._crc32(c, self.key0) + self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295 + self.key1 = (self.key1 * 134775813 + 1) & 4294967295 + self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2) + + def __call__(self, c): + """Decrypt a single character.""" + c = ord(c) + k = self.key2 | 2 + c = c ^ (((k * (k^1)) >> 8) & 255) + c = chr(c) + self._UpdateKeys(c) + return c + +class ZipExtFile: + """File-like object for reading an archive member. + Is returned by ZipFile.open(). + """ + + def __init__(self, fileobj, zipinfo, decrypt=None): + self.fileobj = fileobj + self.decrypter = decrypt + self.bytes_read = 0L + self.rawbuffer = '' + self.readbuffer = '' + self.linebuffer = '' + self.eof = False + self.univ_newlines = False + self.nlSeps = ("\n", ) + self.lastdiscard = '' + + self.compress_type = zipinfo.compress_type + self.compress_size = zipinfo.compress_size + + self.closed = False + self.mode = "r" + self.name = zipinfo.filename + + # read from compressed files in 64k blocks + self.compreadsize = 64*1024 + if self.compress_type == ZIP_DEFLATED: + self.dc = zlib.decompressobj(-15) + + def set_univ_newlines(self, univ_newlines): + self.univ_newlines = univ_newlines + + # pick line separator char(s) based on universal newlines flag + self.nlSeps = ("\n", ) + if self.univ_newlines: + self.nlSeps = ("\r\n", "\r", "\n") + + def __iter__(self): + return self + + def next(self): + nextline = self.readline() + if not nextline: + raise StopIteration() + + return nextline + + def close(self): + self.closed = True + + def _checkfornewline(self): + nl, nllen = -1, -1 + if self.linebuffer: + # ugly check for cases where half of an \r\n pair was + # read on the last pass, and the \r was discarded. In this + # case we just throw away the \n at the start of the buffer. + if (self.lastdiscard, self.linebuffer[0]) == ('\r','\n'): + self.linebuffer = self.linebuffer[1:] + + for sep in self.nlSeps: + nl = self.linebuffer.find(sep) + if nl >= 0: + nllen = len(sep) + return nl, nllen + + return nl, nllen + + def readline(self, size = -1): + """Read a line with approx. size. If size is negative, + read a whole line. + """ + if size < 0: + size = sys.maxint + elif size == 0: + return '' + + # check for a newline already in buffer + nl, nllen = self._checkfornewline() + + if nl >= 0: + # the next line was already in the buffer + nl = min(nl, size) + else: + # no line break in buffer - try to read more + size -= len(self.linebuffer) + while nl < 0 and size > 0: + buf = self.read(min(size, 100)) + if not buf: + break + self.linebuffer += buf + size -= len(buf) + + # check for a newline in buffer + nl, nllen = self._checkfornewline() + + # we either ran out of bytes in the file, or + # met the specified size limit without finding a newline, + # so return current buffer + if nl < 0: + s = self.linebuffer + self.linebuffer = '' + return s + + buf = self.linebuffer[:nl] + self.lastdiscard = self.linebuffer[nl:nl + nllen] + self.linebuffer = self.linebuffer[nl + nllen:] + + # line is always returned with \n as newline char (except possibly + # for a final incomplete line in the file, which is handled above). + return buf + "\n" + + def readlines(self, sizehint = -1): + """Return a list with all (following) lines. The sizehint parameter + is ignored in this implementation. + """ + result = [] + while True: + line = self.readline() + if not line: break + result.append(line) + return result + + def read(self, size = None): + # act like file() obj and return empty string if size is 0 + if size == 0: + return '' + + # determine read size + bytesToRead = self.compress_size - self.bytes_read + + # adjust read size for encrypted files since the first 12 bytes + # are for the encryption/password information + if self.decrypter is not None: + bytesToRead -= 12 + + if size is not None and size >= 0: + if self.compress_type == ZIP_STORED: + lr = len(self.readbuffer) + bytesToRead = min(bytesToRead, size - lr) + elif self.compress_type == ZIP_DEFLATED: + if len(self.readbuffer) > size: + # the user has requested fewer bytes than we've already + # pulled through the decompressor; don't read any more + bytesToRead = 0 + else: + # user will use up the buffer, so read some more + lr = len(self.rawbuffer) + bytesToRead = min(bytesToRead, self.compreadsize - lr) + + # avoid reading past end of file contents + if bytesToRead + self.bytes_read > self.compress_size: + bytesToRead = self.compress_size - self.bytes_read + + # try to read from file (if necessary) + if bytesToRead > 0: + bytes = self.fileobj.read(bytesToRead) + self.bytes_read += len(bytes) + self.rawbuffer += bytes + + # handle contents of raw buffer + if self.rawbuffer: + newdata = self.rawbuffer + self.rawbuffer = '' + + # decrypt new data if we were given an object to handle that + if newdata and self.decrypter is not None: + newdata = ''.join(map(self.decrypter, newdata)) + + # decompress newly read data if necessary + if newdata and self.compress_type == ZIP_DEFLATED: + newdata = self.dc.decompress(newdata) + self.rawbuffer = self.dc.unconsumed_tail + if self.eof and len(self.rawbuffer) == 0: + # we're out of raw bytes (both from the file and + # the local buffer); flush just to make sure the + # decompressor is done + newdata += self.dc.flush() + # prevent decompressor from being used again + self.dc = None + + self.readbuffer += newdata + + + # return what the user asked for + if size is None or len(self.readbuffer) <= size: + bytes = self.readbuffer + self.readbuffer = '' + else: + bytes = self.readbuffer[:size] + self.readbuffer = self.readbuffer[size:] + + return bytes + + +class ZipFile: + """ Class with methods to open, read, write, close, list zip files. + + z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False) + + file: Either the path to the file, or a file-like object. + If it is a path, the file will be opened and closed by ZipFile. + mode: The mode can be either read "r", write "w" or append "a". + compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib). + allowZip64: if True ZipFile will create files with ZIP64 extensions when + needed, otherwise it will raise an exception when this would + be necessary. + + """ + + fp = None # Set here since __del__ checks it + + def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False): + """Open the ZIP file with mode read "r", write "w" or append "a".""" + if mode not in ("r", "w", "a"): + raise RuntimeError('ZipFile() requires mode "r", "w", or "a"') + + if compression == ZIP_STORED: + pass + elif compression == ZIP_DEFLATED: + if not zlib: + raise RuntimeError,\ + "Compression requires the (missing) zlib module" + else: + raise RuntimeError, "That compression method is not supported" + + self._allowZip64 = allowZip64 + self._didModify = False + self.debug = 0 # Level of printing: 0 through 3 + self.NameToInfo = {} # Find file info given name + self.filelist = [] # List of ZipInfo instances for archive + self.compression = compression # Method of compression + self.mode = key = mode.replace('b', '')[0] + self.pwd = None + self.comment = '' + + # Check if we were passed a file-like object + if isinstance(file, basestring): + self._filePassed = 0 + self.filename = file + modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'} + try: + self.fp = open(file, modeDict[mode]) + except IOError: + if mode == 'a': + mode = key = 'w' + self.fp = open(file, modeDict[mode]) + else: + raise + else: + self._filePassed = 1 + self.fp = file + self.filename = getattr(file, 'name', None) + + if key == 'r': + self._GetContents() + elif key == 'w': + pass + elif key == 'a': + try: # See if file is a zip file + self._RealGetContents() + # seek to start of directory and overwrite + self.fp.seek(self.start_dir, 0) + except BadZipfile: # file is not a zip file, just append + self.fp.seek(0, 2) + else: + if not self._filePassed: + self.fp.close() + self.fp = None + raise RuntimeError, 'Mode must be "r", "w" or "a"' + + def _GetContents(self): + """Read the directory, making sure we close the file if the format + is bad.""" + try: + self._RealGetContents() + except BadZipfile: + if not self._filePassed: + self.fp.close() + self.fp = None + raise + + def _RealGetContents(self): + """Read in the table of contents for the ZIP file.""" + fp = self.fp + endrec = _EndRecData(fp) + if not endrec: + raise BadZipfile, "File is not a zip file" + if self.debug > 1: + print endrec + size_cd = endrec[_ECD_SIZE] # bytes in central directory + offset_cd = endrec[_ECD_OFFSET] # offset of central directory + self.comment = endrec[_ECD_COMMENT] # archive comment + + # "concat" is zero, unless zip was concatenated to another file + concat = endrec[_ECD_LOCATION] - size_cd - offset_cd + if endrec[_ECD_SIGNATURE] == stringEndArchive64: + # If Zip64 extension structures are present, account for them + concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) + + if self.debug > 2: + inferred = concat + offset_cd + print "given, inferred, offset", offset_cd, inferred, concat + # self.start_dir: Position of start of central directory + self.start_dir = offset_cd + concat + fp.seek(self.start_dir, 0) + data = fp.read(size_cd) + fp = cStringIO.StringIO(data) + total = 0 + while total < size_cd: + centdir = fp.read(sizeCentralDir) + if centdir[0:4] != stringCentralDir: + raise BadZipfile, "Bad magic number for central directory" + centdir = struct.unpack(structCentralDir, centdir) + if self.debug > 2: + print centdir + filename = fp.read(centdir[_CD_FILENAME_LENGTH]) + # Create ZipInfo instance to store file information + x = ZipInfo(filename) + x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) + x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) + x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] + (x.create_version, x.create_system, x.extract_version, x.reserved, + x.flag_bits, x.compress_type, t, d, + x.CRC, x.compress_size, x.file_size) = centdir[1:12] + x.volume, x.internal_attr, x.external_attr = centdir[15:18] + # Convert date/time code to (year, month, day, hour, min, sec) + x._raw_time = t + x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, + t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) + + x._decodeExtra() + x.header_offset = x.header_offset + concat + x.filename = x._decodeFilename() + self.filelist.append(x) + self.NameToInfo[x.filename] = x + + # update total bytes read from central directory + total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + + centdir[_CD_EXTRA_FIELD_LENGTH] + + centdir[_CD_COMMENT_LENGTH]) + + if self.debug > 2: + print "total", total + + + def namelist(self): + """Return a list of file names in the archive.""" + l = [] + for data in self.filelist: + l.append(data.filename) + return l + + def infolist(self): + """Return a list of class ZipInfo instances for files in the + archive.""" + return self.filelist + + def printdir(self): + """Print a table of contents for the zip file.""" + print "%-46s %19s %12s" % ("File Name", "Modified ", "Size") + for zinfo in self.filelist: + date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] + print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size) + + def testzip(self): + """Read all the files and check the CRC.""" + chunk_size = 2 ** 20 + for zinfo in self.filelist: + try: + # Read by chunks, to avoid an OverflowError or a + # MemoryError with very large embedded files. + f = self.open(zinfo.filename, "r") + while f.read(chunk_size): # Check CRC-32 + pass + except BadZipfile: + return zinfo.filename + + def getinfo(self, name): + """Return the instance of ZipInfo given 'name'.""" + info = self.NameToInfo.get(name) + if info is None: + raise KeyError( + 'There is no item named %r in the archive' % name) + + return info + + def setpassword(self, pwd): + """Set default password for encrypted files.""" + self.pwd = pwd + + def read(self, name, pwd=None): + """Return file bytes (as a string) for name.""" + return self.open(name, "r", pwd).read() + + def open(self, name, mode="r", pwd=None): + """Return file-like object for 'name'.""" + if mode not in ("r", "U", "rU"): + raise RuntimeError, 'open() requires mode "r", "U", or "rU"' + if not self.fp: + raise RuntimeError, \ + "Attempt to read ZIP archive that was already closed" + + # Only open a new file for instances where we were not + # given a file object in the constructor + if self._filePassed: + zef_file = self.fp + else: + zef_file = open(self.filename, 'rb') + + # Make sure we have an info object + if isinstance(name, ZipInfo): + # 'name' is already an info object + zinfo = name + else: + # Get info object for name + zinfo = self.getinfo(name) + + zef_file.seek(zinfo.header_offset, 0) + + # Skip the file header: + fheader = zef_file.read(sizeFileHeader) + if fheader[0:4] != stringFileHeader: + raise BadZipfile, "Bad magic number for file header" + + fheader = struct.unpack(structFileHeader, fheader) + fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) + if fheader[_FH_EXTRA_FIELD_LENGTH]: + zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH]) + + if fname != zinfo.orig_filename: + raise BadZipfile, \ + 'File name in directory "%s" and header "%s" differ.' % ( + zinfo.orig_filename, fname) + + # check for encrypted flag & handle password + is_encrypted = zinfo.flag_bits & 0x1 + zd = None + if is_encrypted: + if not pwd: + pwd = self.pwd + if not pwd: + raise RuntimeError, "File %s is encrypted, " \ + "password required for extraction" % name + + zd = _ZipDecrypter(pwd) + # The first 12 bytes in the cypher stream is an encryption header + # used to strengthen the algorithm. The first 11 bytes are + # completely random, while the 12th contains the MSB of the CRC, + # or the MSB of the file time depending on the header type + # and is used to check the correctness of the password. + bytes = zef_file.read(12) + h = map(zd, bytes[0:12]) + if zinfo.flag_bits & 0x8: + # compare against the file type from extended local headers + check_byte = (zinfo._raw_time >> 8) & 0xff + else: + # compare against the CRC otherwise + check_byte = (zinfo.CRC >> 24) & 0xff + if ord(h[11]) != check_byte: + raise RuntimeError("Bad password for file", name) + + # build and return a ZipExtFile + if zd is None: + zef = ZipExtFile(zef_file, zinfo) + else: + zef = ZipExtFile(zef_file, zinfo, zd) + + # set universal newlines on ZipExtFile if necessary + if "U" in mode: + zef.set_univ_newlines(True) + return zef + + def extract(self, member, path=None, pwd=None): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a ZipInfo object. You can + specify a different directory using `path'. + """ + if not isinstance(member, ZipInfo): + member = self.getinfo(member) + + if path is None: + path = os.getcwd() + + return self._extract_member(member, path, pwd) + + def extractall(self, path=None, members=None, pwd=None): + """Extract all members from the archive to the current working + directory. `path' specifies a different directory to extract to. + `members' is optional and must be a subset of the list returned + by namelist(). + """ + if members is None: + members = self.namelist() + + for zipinfo in members: + self.extract(zipinfo, path, pwd) + + def _extract_member(self, member, targetpath, pwd): + """Extract the ZipInfo object 'member' to a physical + file on the path targetpath. + """ + # build the destination pathname, replacing + # forward slashes to platform specific separators. + if targetpath[-1:] in (os.path.sep, os.path.altsep): + targetpath = targetpath[:-1] + + # don't include leading "/" from file name if present + if member.filename[0] == '/': + targetpath = os.path.join(targetpath, member.filename[1:]) + else: + targetpath = os.path.join(targetpath, member.filename) + + targetpath = os.path.normpath(targetpath) + + # Create all upper directories if necessary. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + os.makedirs(upperdirs) + + if member.filename[-1] == '/': + os.mkdir(targetpath) + return targetpath + + source = self.open(member, pwd=pwd) + target = file(targetpath, "wb") + shutil.copyfileobj(source, target) + source.close() + target.close() + + return targetpath + + def _writecheck(self, zinfo): + """Check for errors before writing a file to the archive.""" + if zinfo.filename in self.NameToInfo: + if self.debug: # Warning for duplicate names + print "Duplicate name:", zinfo.filename + if self.mode not in ("w", "a"): + raise RuntimeError, 'write() requires mode "w" or "a"' + if not self.fp: + raise RuntimeError, \ + "Attempt to write ZIP archive that was already closed" + if zinfo.compress_type == ZIP_DEFLATED and not zlib: + raise RuntimeError, \ + "Compression requires the (missing) zlib module" + if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED): + raise RuntimeError, \ + "That compression method is not supported" + if zinfo.file_size > ZIP64_LIMIT: + if not self._allowZip64: + raise LargeZipFile("Filesize would require ZIP64 extensions") + if zinfo.header_offset > ZIP64_LIMIT: + if not self._allowZip64: + raise LargeZipFile("Zipfile size would require ZIP64 extensions") + + def write(self, filename, arcname=None, compress_type=None): + """Put the bytes from filename into the archive under the name + arcname.""" + if not self.fp: + raise RuntimeError( + "Attempt to write to ZIP archive that was already closed") + + st = os.stat(filename) + isdir = stat.S_ISDIR(st.st_mode) + mtime = time.localtime(st.st_mtime) + date_time = mtime[0:6] + # Create ZipInfo instance to store file information + if arcname is None: + arcname = filename + arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) + while arcname[0] in (os.sep, os.altsep): + arcname = arcname[1:] + if isdir: + arcname += '/' + zinfo = ZipInfo(arcname, date_time) + zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes + if compress_type is None: + zinfo.compress_type = self.compression + else: + zinfo.compress_type = compress_type + + zinfo.file_size = st.st_size + zinfo.flag_bits = 0x00 + zinfo.header_offset = self.fp.tell() # Start of header bytes + + self._writecheck(zinfo) + self._didModify = True + + if isdir: + zinfo.file_size = 0 + zinfo.compress_size = 0 + zinfo.CRC = 0 + self.filelist.append(zinfo) + self.NameToInfo[zinfo.filename] = zinfo + self.fp.write(zinfo.FileHeader()) + return + + fp = open(filename, "rb") + # Must overwrite CRC and sizes with correct data later + zinfo.CRC = CRC = 0 + zinfo.compress_size = compress_size = 0 + zinfo.file_size = file_size = 0 + self.fp.write(zinfo.FileHeader()) + if zinfo.compress_type == ZIP_DEFLATED: + cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, + zlib.DEFLATED, -15) + else: + cmpr = None + while 1: + buf = fp.read(1024 * 8) + if not buf: + break + file_size = file_size + len(buf) + CRC = crc32(buf, CRC) & 0xffffffff + if cmpr: + buf = cmpr.compress(buf) + compress_size = compress_size + len(buf) + self.fp.write(buf) + fp.close() + if cmpr: + buf = cmpr.flush() + compress_size = compress_size + len(buf) + self.fp.write(buf) + zinfo.compress_size = compress_size + else: + zinfo.compress_size = file_size + zinfo.CRC = CRC + zinfo.file_size = file_size + # Seek backwards and write CRC and file sizes + position = self.fp.tell() # Preserve current position in file + self.fp.seek(zinfo.header_offset + 14, 0) + self.fp.write(struct.pack(" ZIP64_LIMIT \ + or zinfo.compress_size > ZIP64_LIMIT: + extra.append(zinfo.file_size) + extra.append(zinfo.compress_size) + file_size = 0xffffffff + compress_size = 0xffffffff + else: + file_size = zinfo.file_size + compress_size = zinfo.compress_size + + if zinfo.header_offset > ZIP64_LIMIT: + extra.append(zinfo.header_offset) + header_offset = 0xffffffffL + else: + header_offset = zinfo.header_offset + + extra_data = zinfo.extra + if extra: + # Append a ZIP64 field to the extra's + extra_data = struct.pack( + '>sys.stderr, (structCentralDir, + stringCentralDir, create_version, + zinfo.create_system, extract_version, zinfo.reserved, + zinfo.flag_bits, zinfo.compress_type, dostime, dosdate, + zinfo.CRC, compress_size, file_size, + len(zinfo.filename), len(extra_data), len(zinfo.comment), + 0, zinfo.internal_attr, zinfo.external_attr, + header_offset) + raise + self.fp.write(centdir) + self.fp.write(filename) + self.fp.write(extra_data) + self.fp.write(zinfo.comment) + + pos2 = self.fp.tell() + # Write end-of-zip-archive record + centDirCount = count + centDirSize = pos2 - pos1 + centDirOffset = pos1 + if (centDirCount >= ZIP_FILECOUNT_LIMIT or + centDirOffset > ZIP64_LIMIT or + centDirSize > ZIP64_LIMIT): + # Need to write the ZIP64 end-of-archive records + zip64endrec = struct.pack( + structEndArchive64, stringEndArchive64, + 44, 45, 45, 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset) + self.fp.write(zip64endrec) + + zip64locrec = struct.pack( + structEndArchive64Locator, + stringEndArchive64Locator, 0, pos2, 1) + self.fp.write(zip64locrec) + centDirCount = min(centDirCount, 0xFFFF) + centDirSize = min(centDirSize, 0xFFFFFFFF) + centDirOffset = min(centDirOffset, 0xFFFFFFFF) + + # check for valid comment length + if len(self.comment) >= ZIP_MAX_COMMENT: + if self.debug > 0: + msg = 'Archive comment is too long; truncating to %d bytes' \ + % ZIP_MAX_COMMENT + self.comment = self.comment[:ZIP_MAX_COMMENT] + + endrec = struct.pack(structEndArchive, stringEndArchive, + 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset, len(self.comment)) + self.fp.write(endrec) + self.fp.write(self.comment) + self.fp.flush() + + if not self._filePassed: + self.fp.close() + self.fp = None + + +class PyZipFile(ZipFile): + """Class to create ZIP archives with Python library files and packages.""" + + def writepy(self, pathname, basename = ""): + """Add all files from "pathname" to the ZIP archive. + + If pathname is a package directory, search the directory and + all package subdirectories recursively for all *.py and enter + the modules into the archive. If pathname is a plain + directory, listdir *.py and enter all modules. Else, pathname + must be a Python *.py file and the module will be put into the + archive. Added modules are always module.pyo or module.pyc. + This method will compile the module.py into module.pyc if + necessary. + """ + dir, name = os.path.split(pathname) + if os.path.isdir(pathname): + initname = os.path.join(pathname, "__init__.py") + if os.path.isfile(initname): + # This is a package directory, add it + if basename: + basename = "%s/%s" % (basename, name) + else: + basename = name + if self.debug: + print "Adding package in", pathname, "as", basename + fname, arcname = self._get_codename(initname[0:-3], basename) + if self.debug: + print "Adding", arcname + self.write(fname, arcname) + dirlist = os.listdir(pathname) + dirlist.remove("__init__.py") + # Add all *.py files and package subdirectories + for filename in dirlist: + path = os.path.join(pathname, filename) + root, ext = os.path.splitext(filename) + if os.path.isdir(path): + if os.path.isfile(os.path.join(path, "__init__.py")): + # This is a package directory, add it + self.writepy(path, basename) # Recursive call + elif ext == ".py": + fname, arcname = self._get_codename(path[0:-3], + basename) + if self.debug: + print "Adding", arcname + self.write(fname, arcname) + else: + # This is NOT a package directory, add its files at top level + if self.debug: + print "Adding files from directory", pathname + for filename in os.listdir(pathname): + path = os.path.join(pathname, filename) + root, ext = os.path.splitext(filename) + if ext == ".py": + fname, arcname = self._get_codename(path[0:-3], + basename) + if self.debug: + print "Adding", arcname + self.write(fname, arcname) + else: + if pathname[-3:] != ".py": + raise RuntimeError, \ + 'Files added with writepy() must end with ".py"' + fname, arcname = self._get_codename(pathname[0:-3], basename) + if self.debug: + print "Adding file", arcname + self.write(fname, arcname) + + def _get_codename(self, pathname, basename): + """Return (filename, archivename) for the path. + + Given a module name path, return the correct file path and + archive name, compiling if necessary. For example, given + /python/lib/string, return (/python/lib/string.pyc, string). + """ + file_py = pathname + ".py" + file_pyc = pathname + ".pyc" + file_pyo = pathname + ".pyo" + if os.path.isfile(file_pyo) and \ + os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime: + fname = file_pyo # Use .pyo file + elif not os.path.isfile(file_pyc) or \ + os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime: + import py_compile + if self.debug: + print "Compiling", file_py + try: + py_compile.compile(file_py, file_pyc, None, True) + except py_compile.PyCompileError,err: + print err.msg + fname = file_pyc + else: + fname = file_pyc + archivename = os.path.split(fname)[1] + if basename: + archivename = "%s/%s" % (basename, archivename) + return (fname, archivename) + + +def main(args = None): + import textwrap + USAGE=textwrap.dedent("""\ + Usage: + zipfile.py -l zipfile.zip # Show listing of a zipfile + zipfile.py -t zipfile.zip # Test if a zipfile is valid + zipfile.py -e zipfile.zip target # Extract zipfile into target dir + zipfile.py -c zipfile.zip src ... # Create zipfile from sources + """) + if args is None: + args = sys.argv[1:] + + if not args or args[0] not in ('-l', '-c', '-e', '-t'): + print USAGE + sys.exit(1) + + if args[0] == '-l': + if len(args) != 2: + print USAGE + sys.exit(1) + zf = ZipFile(args[1], 'r') + zf.printdir() + zf.close() + + elif args[0] == '-t': + if len(args) != 2: + print USAGE + sys.exit(1) + zf = ZipFile(args[1], 'r') + zf.testzip() + print "Done testing" + + elif args[0] == '-e': + if len(args) != 3: + print USAGE + sys.exit(1) + + zf = ZipFile(args[1], 'r') + out = args[2] + for path in zf.namelist(): + if path.startswith('./'): + tgt = os.path.join(out, path[2:]) + else: + tgt = os.path.join(out, path) + + tgtdir = os.path.dirname(tgt) + if not os.path.exists(tgtdir): + os.makedirs(tgtdir) + fp = open(tgt, 'wb') + fp.write(zf.read(path)) + fp.close() + zf.close() + + elif args[0] == '-c': + if len(args) < 3: + print USAGE + sys.exit(1) + + def addToZip(zf, path, zippath): + if os.path.isfile(path): + zf.write(path, zippath, ZIP_DEFLATED) + elif os.path.isdir(path): + for nm in os.listdir(path): + addToZip(zf, + os.path.join(path, nm), os.path.join(zippath, nm)) + # else: ignore + + zf = ZipFile(args[1], 'w', allowZip64=True) + for src in args[2:]: + addToZip(zf, src, os.path.basename(src)) + + zf.close() + +if __name__ == "__main__": + main() diff --git a/image_filer/views.py b/image_filer/views.py new file mode 100644 index 0000000..b9d8833 --- /dev/null +++ b/image_filer/views.py @@ -0,0 +1,215 @@ +import os +from django.shortcuts import render_to_response +from django.template import RequestContext +from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden,HttpResponseBadRequest +from django.contrib.sessions.models import Session +from django.conf import settings + +from models import Folder, FolderRoot, Image, Bucket, BucketItem + +from django import forms + +class NewFolderForm(forms.ModelForm): + class Meta: + model = Folder + fields = ('name', ) + +def _userperms(item, request): + r = [] + ps = ['read', 'edit', 'add_children'] + for p in ps: + attr = "has_%s_permission" % p + if hasattr(item, attr): + x = getattr(item, attr)(request) + if x: + r.append( p ) + return r + +def directory_listing(request, folder_id=None): + #print request.session.session_key, request.user, type(request.session) + #print "%s.SessionStore" % settings.SESSION_ENGINE + new_folder_form = NewFolderForm() + #print request.user + + if not folder_id == None: + folder = Folder.objects.get(id=folder_id) + #print "readX: %s" % getattr(folder, 'has_read_permission')(request) + #print "readX2: %s" % folder.has_read_permission(request) + else: + folder = FolderRoot() + + + # Debug + upload_file_form = UploadFileForm() + + folder_children = [] + folder_files = [] + if type(folder) == FolderRoot: + for f in folder.children: + f.perms = _userperms(f, request) + folder_children.append(f) + else: + for f in folder.children.all(): + f.perms = _userperms(f, request) + if hasattr(f, 'has_read_permission'): + if f.has_read_permission(request): + folder_children.append(f) + else: + folder_children.append(f) + for f in folder.files: + f.perms = _userperms(f, request) + if hasattr(f, 'has_read_permission'): + if f.has_read_permission(request): + folder_files.append(f) + else: + folder_files.append(f) + try: + permissions = { + 'has_edit_permission': folder.has_edit_permission(request), + 'has_read_permission': folder.has_read_permission(request), + 'has_add_children_permission': folder.has_add_children_permission(request), + } + except: + permissions = {} + + #print folder_files + #print folder_children + return render_to_response('image_filer/directory_listing.html', { + 'folder':folder, + 'folder_children':folder_children, + 'folder_files':folder_files, + 'new_folder_form': new_folder_form, + 'upload_file_form': upload_file_form, + 'permissions': permissions, + 'permstest': _userperms(folder, request) + }, context_instance=RequestContext(request)) + +def edit_folder(request, folder_id): + # TODO: implement edit_folder view + folder=None + return render_to_response('image_filer/folder_edit.html', { + 'folder':folder, + }, context_instance=RequestContext(request)) + +def edit_image(request, folder_id): + # TODO: implement edit_image view + folder=None + return render_to_response('image_filer/image_edit.html', { + 'folder':folder, + }, context_instance=RequestContext(request)) + +def make_folder(request, folder_id=None): + if folder_id: + folder = Folder.objects.get(id=folder_id) + else: + folder = None + if request.user.is_superuser: + pass + elif folder == None: + # regular users may not add root folders + return HttpResponseForbidden() + elif not folder.has_add_children_permission(request): + # the user does not have the permission to add subfolders + return HttpResponseForbidden() + + if request.method == 'POST': + new_folder_form = NewFolderForm(request.POST) + if new_folder_form.is_valid(): + new_folder = new_folder_form.save(commit=False) + new_folder.parent = folder + new_folder.owner = request.user + new_folder.save() + return HttpResponseRedirect('') + else: + new_folder_form = NewFolderForm() + return render_to_response('image_filer/include/new_folder_form.html', { + 'new_folder_form': new_folder_form, + }, context_instance=RequestContext(request)) + +class UploadFileForm(forms.ModelForm): + class Meta: + model=Image + #fields = ('file',) + +from image_filer.utils.files import generic_handle_file + +def upload(request, folder_id=None): + """ + receives an upload from the flash uploader and fixes the session + because of the missing cookie. Receives only one file at the time, + althow it may be a zip file, that will be unpacked. + """ + + # flashcookie-hack (flash does not submit the cookie, so we send the + # django sessionid over regular post + engine = __import__(settings.SESSION_ENGINE, {}, {}, ['']) + session_key = request.POST.get('cookieVar') + request.session = engine.SessionStore(session_key) + #print request.session.session_key, request.user + if folder_id: + folder = Folder.objects.get(id=folder_id) + else: + folder = None + + # check permissions + if request.user.is_superuser: + pass + elif folder == None: + # regular users may not add root folders + return HttpResponseForbidden() + elif not folder.has_add_children_permission(request): + # the user does not have the permission to images to this folder + print "bad perms" + return HttpResponseForbidden() + + # upload and save the file + if not request.method == 'POST': + return HttpResponse("must be POST") + original_filename = request.POST.get('Filename') + file = request.FILES.get('Filedata') + print request.FILES + print original_filename, file + bucket, was_bucket_created = Bucket.objects.get_or_create(user=request.user) + print bucket + files = generic_handle_file(file, original_filename) + for ifile, iname in files: + iext = os.path.splitext(iname)[1].lower() + print "extension: ", iext + if iext in ['.jpg','.jpeg','.png','.gif']: + imageform = UploadFileForm({'original_filename':iname,'owner': request.user.pk}, {'file':ifile}) + if imageform.is_valid(): + print 'imageform is valid' + image = imageform.save(commit=False) + image.save() + bi = BucketItem(bucket=bucket, file=image) + bi.save() + print image + else: + print imageform.errors + return HttpResponse("ok") + +def move_files_to_folder(request, bucket_id=None): + folder = Folder.objects.get( id=request.GET.get('folder_id') ) + try: + ids = request.GET.get('file_ids').split(',') + except: + ids = None + if bucket_id: + bucket = Bucket.objects.get(id=bucket_id) + files = bucket.files.all() + elif ids: + files = Image.objects.filter(id__in=ids) + else: + return HttpResponse('nothing to do') + for file in files: + file.folder = folder + file.save() + return HttpResponse('ok') + + +def add_file_to_bucket(request): + pass + +def export_bucket(request): + pass +