File: flickr.py

package info (click to toggle)
nodebox-web 1.9.4.6-1
  • links: PTS, VCS
  • area: main
  • in suites: squeeze
  • size: 1,904 kB
  • ctags: 1,602
  • sloc: python: 7,582; ansic: 581; xml: 239; makefile: 2
file content (202 lines) | stat: -rw-r--r-- 6,507 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
from urllib import quote_plus
from nodebox_web.web.url import URLAccumulator
from xml.dom.minidom import parseString
import os
from nodebox_web.web.cache import Cache

API_KEY = "787081027f43b0412ba41142d4540480"

def clear_cache():
    Cache("flickr").clear()

### FLICKR IMAGE #####################################################################################

SIZE_SQUARE    = "Square"
SIZE_SMALL     = "Thumbnail"
SIZE_MEDIUM    = "Small"
SIZE_LARGE     = "Medium"
SIZE_XLARGE    = "Original"

def disambiguate_size(size):
    if size == True  : return SIZE_THUMBNAIL
    if size == False : return SIZE_XLARGE
    if size.lower() in ("square", "sq"): 
        return SIZE_SQUARE
    if size.lower() in ("small", "s", "thumbnail", "thumb", "t", "th", "icon"):
        return SIZE_SMALL
    if size.lower() in ("medium", "m"): 
        return SIZE_MEDIUM
    if size.lower() in ("large", "l"): 
        return SIZE_LARGE
    if size.lower() in ("original", "o", "xlarge", "xl", "huge", "wallpaper"): 
        return SIZE_XLARGE
    return size

class FlickrImage(URLAccumulator):
    
    def __init__(self):
        
        self.id        = 0
        self.name      = ""
        self.author    = ""
        self.url       = ""
        self.path      = ""

        # These are just here for consistency
        # with MorgueFileImage objects.
        self.category  = ""
        self.date      = ""
        self.views     = 0
        self.downloads = 0
        
        self._download = None
        
    def __str__(self):
        
        return self.name.encode("utf-8")
        
    def __cmp__(self, other):
        
        return 1
        
    def download(self, size=SIZE_XLARGE, thumbnail=False, wait=60, asynchronous=False):
        
        """ Downloads this image to cache.
        
        Calling the download() method instantiates an asynchronous URLAccumulator
        that will fetch the image's URL from Flickr.
        A second process then downloads the file at the retrieved URL.
        
        Once it is done downloading, this image will have its path property
        set to an image file in the cache.
        
        """
        
        if thumbnail == True: size = SIZE_THUMBNAIL # backwards compatibility
        self._size = disambiguate_size(size)
        self._wait = wait
        self._asynchronous = asynchronous

        url  = "http://api.flickr.com/services/rest/?method=flickr.photos.getSizes"
        url += "&photo_id=" + self.id
        url += "&api_key=" + API_KEY
        URLAccumulator.__init__(self, url, wait, asynchronous, "flickr", ".xml", 2)

        if not asynchronous:
            return self.path
        
    def load(self, data):
        
        # Step one: fetch the image location from the Flickr API.
        if self.url.startswith("http://api.flickr.com"):
            dom = parseString(data)
            for e in dom.getElementsByTagName("size"):
                self.url = e.getAttribute("source")
                label = e.getAttribute("label")
                # We pick the requested size.
                if label == self._size: break
            
            # Step two: we know where the image is located,
            # now start downloading it.
            extension = os.path.basename(self.url)[-4:]
            self._download = URLAccumulator(self.url, self._wait, self._asynchronous, "flickr", extension, 2)
            
    def _done(self):
        
        done = URLAccumulator._done(self)
        if self._download:
            if self._download.done: 
                # Step three: set the path to the cached image.
                self.path = self._download._cache.hash(self._download.url)
            return done and self._download.done
        else:
            return done

    done = property(_done)

### FLICKR SEARCH ####################################################################################

SORT_INTERESTINGNESS = "interestingness-desc"
SORT_RELEVANCE = "relevance"
SORT_DATE = "date-posted-desc"

MATCH_ANY = "any" # any of the supplied keywords
MATCH_ALL = "all" # all of the supplied keywords

def disambiguate_sort(sort):
    if sort.lower().startswith("interest"): 
        return SORT_INTERESTINGNESS
    if sort.lower().startswith("relevan"): 
        return SORT_RELEVANCE
    if sort.lower().startswith("date"): 
        return SORT_DATE
    return sort

class FlickrSearch(list, URLAccumulator):
    
    def __init__(self, q, start=1, count=100, wait=10, asynchronous=False, cached=True, 
                 sort=SORT_RELEVANCE, match=MATCH_ANY):

        try: q = q.encode("utf-8")
        except:
            pass

        if cached: 
            cache = "flickr"
        else:
            cache = None
        
        url  = "http://api.flickr.com/services/rest/?method="
        if q == "recent":
            url += "flickr.photos.getRecent"
        else:
            url += "flickr.photos.search"
        if isinstance(q, (list, tuple)):
            q = [quote_plus(q) for q in q]
            q = ",".join(q)
            url += "&tags=" + quote_plus(q)
            url += "&tag_mode=" + match
        else:
            url += "&text=" + quote_plus(q)
        url += "&page=" + str(start)
        url += "&per_page=" + str(count)
        url += "&sort=" + disambiguate_sort(sort)
        url += "&api_key=" + API_KEY
        
        URLAccumulator.__init__(self, url, wait, asynchronous, cache, ".xml", 1)

    def load(self, data):
        
        if data == "": return
        dom = parseString(data)
        for img in dom.getElementsByTagName("photo"):
            self.append(self._parse_image(img))
            
    def _parse_image(self, xml):
        
        fi = FlickrImage()
        fi.id     = xml.getAttribute("id")
        fi.name   = xml.getAttribute("title")
        fi.author = xml.getAttribute("owner")
        
        return fi

######################################################################################################
 
def recent(start=1, count=100, wait=10, asynchronous=False, cached=True):
    
    return FlickrSearch("recent", start, count, wait, asynchronous, cached)

def search(q, start=1, count=100, wait=10, asynchronous=False, cached=True, 
           sort=SORT_RELEVANCE, match=MATCH_ANY):
    
    return FlickrSearch(q, start, count, wait, asynchronous, cached, sort, match)

#images = search("glacier")
#img = images[0]
#img.download(asynchronous=True)
#while not img.done:
#    sleep(0.1)
#    print "zzz..."
#image(img.path, 0, 0)