File: flickr.py

package info (click to toggle)
nodebox-web 1.9.2-2
  • links: PTS
  • area: main
  • in suites: lenny
  • size: 1,724 kB
  • ctags: 1,254
  • sloc: python: 6,161; sh: 602; xml: 239; makefile: 33
file content (160 lines) | stat: -rw-r--r-- 5,107 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
from urllib import quote_plus
from url import URLAccumulator
from xml.dom.minidom import parseString
import os
from cache import Cache

API_KEY = "787081027f43b0412ba41142d4540480"

def clear_cache():
    Cache("flickr").clear()

### MORGUEFILE IMAGE #################################################################################

class FlickrImage(URLAccumulator):
    
    def __init__(self):
        
        self.id        = 0
        self.name      = ""
        self.author    = ""
        self.url       = ""
        self.path      = ""

        # These are just here for consistency
        # with MorgueFileImage objects.
        self.category  = ""
        self.date      = ""
        self.views     = 0
        self.downloads = 0
        
        self._download = None
        
    def __str__(self):
        
        return self.name
        
    def __cmp__(self, other):
        
        return 1
        
    def download(self, thumbnail=False, wait=60, asynchronous=False):
        
        """ Downloads this image to cache.
        
        Calling the download() method instantiates an asynchronous URLAccumulator
        that will fetch the image's URL from Flickr.
        A second process then downloads the file at the retrieved URL.
        
        Once it is done downloading, this image will have its path property
        set to an image file in the cache.
        
        """
        
        self._thumbnail = thumbnail
        self._wait = wait
        self._asynchronous = asynchronous

        url  = "http://api.flickr.com/services/rest/?method=flickr.photos.getSizes"
        url += "&photo_id=" + self.id
        url += "&api_key=" + API_KEY
        URLAccumulator.__init__(self, url, wait, asynchronous, "flickr", ".xml", 2)

        if not asynchronous:
            return self.path
        
    def load(self, data):
        
        # Step one: fetch the image location from the Flickr API.
        if self.url.startswith("http://api.flickr.com"):
            dom = parseString(data)
            for e in dom.getElementsByTagName("size"):
                self.url = e.getAttribute("source")
                label = e.getAttribute("label")
                # We pick a thumbnail when asked, 
                # or otherwise preferably the original image.
                if label == "Thumbnail" and self._thumbnail: break
                if label == "Original": break
            
            # Step two: we know where the image is located,
            # now start downloading it.
            extension = os.path.basename(self.url)[-4:]
            self._download = URLAccumulator(self.url, self._wait, self._asynchronous, "flickr", extension, 2)
            
    def _done(self):
        
        done = URLAccumulator._done(self)
        if self._download:
            if self._download.done: 
                # Step three: set the path to the cached image.
                self.path = self._download._cache.hash(self._download.url)
            return done and self._download.done
        else:
            return done

    done = property(_done)

### FLICKR SEARCH ####################################################################################

SORT_INTERESTINGNESS = "interestingness-desc"
SORT_RELEVANCE = "relevance"
SORT_DATE = "date-posted-desc"

class FlickrSearch(list, URLAccumulator):
    
    def __init__(self, q, start=1, count=100, wait=10, asynchronous=False, cached=True, 
                 sort=SORT_INTERESTINGNESS):
        
        if cached: 
            cache = "flickr"
        else:
            cache = None

        url  = "http://api.flickr.com/services/rest/?method="
        if q == "recent":
            url += "flickr.photos.getRecent"
        else:
            url += "flickr.photos.search"
        url += "&tags=" + quote_plus(q)
        url += "&page=" + str(start)
        url += "&per_page=" + str(count)
        url += "&sort=" + sort
        url += "&api_key=" + API_KEY
        
        URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".xml", 1)

    def load(self, data):
        
        if data == "": return
        dom = parseString(data)
        for img in dom.getElementsByTagName("photo"):
            self.append(self._parse_image(img))
            
    def _parse_image(self, xml):
        
        fi = FlickrImage()
        fi.id     = xml.getAttribute("id")
        fi.name   = xml.getAttribute("title")
        fi.author = xml.getAttribute("owner")
        
        return fi

######################################################################################################
 
def recent(start=1, count=100, wait=10, asynchronous=False, cached=True):
    
    return FlickrSearch("recent", start, count, wait, asynchronous, cached)

def search(q, start=1, count=100, wait=10, asynchronous=False, cached=True, 
           sort=SORT_INTERESTINGNESS):
    
    return FlickrSearch(q, start, count, wait, asynchronous, cached, sort)

#images = search("glacier")
#img = images[0]
#img.download(asynchronous=True)
#while not img.done:
#    sleep(0.1)
#    print "zzz..."
#image(img.path, 0, 0)