File: standalone_examples.diff

package info (click to toggle)
nodebox-web 1.9.4.6-2
  • links: PTS, VCS
  • area: main
  • in suites: wheezy
  • size: 1,900 kB
  • sloc: python: 7,582; ansic: 581; xml: 239; makefile: 2
file content (198 lines) | stat: -rw-r--r-- 6,806 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
# Description: create standalone examples
# Author: Serafeim Zanikolas <serzan@hellug.gr>
# Last-Update: 2009-02-20
# Forwarded: not-needed
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example1.py blah/standalone-examples/_web_example1.py
--- nodebox-web-1.9.2/standalone-examples/_web_example1.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example1.py	2008-04-05 23:12:08.000000000 +0100
@@ -0,0 +1,20 @@
+# Working with URL's.
+
+from nodebox_web import web
+
+# Is this a valid URL?
+print web.is_url("http://nodebox.net")
+
+# Does the page exist?
+print web.url.not_found("http://nodebox.net/nothing")
+
+# Split the URL into different components.
+url = web.url.parse("http://nodebox.net/code/index.php/Home")
+print "domain:", url.domain
+print "page:", url.page
+
+# Retrieve data from the web.
+url = "http://nodebox.net/code/data/media/header.jpg"
+print web.url.is_image(url)
+img = web.url.retrieve(url)
+print "download errors:", img.error
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example2.py blah/standalone-examples/_web_example2.py
--- nodebox-web-1.9.2/standalone-examples/_web_example2.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example2.py	2008-04-05 23:13:08.000000000 +0100
@@ -0,0 +1,24 @@
+# Parsing web pages.
+
+from nodebox_web import web
+
+url = "http://nodebox.net"
+print web.url.is_webpage(url)
+
+# Retrieve the data from the web page and put it in an easy object.
+html = web.page.parse(url)
+
+# The actual URL you are redirected to.
+# This will be None when the page is retrieved from cache.
+print html.redirect
+
+# Get the web page title.
+print html.title
+
+# Get all the links, including internal links in the same site.
+print html.links(external=False)
+
+# Browse through the HTML tree, find <div id="content">,
+# strip tags from it and print out the contents.
+content = html.find(id="content")
+web.html.plain(content)
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example3.py blah/standalone-examples/_web_example3.py
--- nodebox-web-1.9.2/standalone-examples/_web_example3.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example3.py	2008-04-05 23:14:54.000000000 +0100
@@ -0,0 +1,24 @@
+# Querying Yahoo!
+
+from nodebox_web import web
+from nodebox_web.web import yahoo
+
+# Get a list of links for a search query.
+links = yahoo.search_images("food")
+print links
+
+# Retrieve a random image.
+img = web.url.retrieve(links)
+
+# We can't always trust the validity of data from the web,
+# the site may be down, the image removed, etc.
+# If you're going to do a lot of batch operations and
+# you don't want the script to halt on an error,
+# put your code inside a try/except statement.
+try:
+    data=img.data
+except:
+    print str(img.error)
+    
+# An easier command is web.download():
+img = web.download(links)
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example4.py blah/standalone-examples/_web_example4.py
--- nodebox-web-1.9.2/standalone-examples/_web_example4.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example4.py	2008-04-05 23:16:00.000000000 +0100
@@ -0,0 +1,22 @@
+# Reading newsfeeds.
+
+from nodebox_web import web
+from nodebox_web.web import newsfeed
+
+url = "http://rss.slashdot.org/Slashdot/slashdot"
+
+# Parse the newsfeed data into a handy object.
+feed = newsfeed.parse(url)
+
+# Get the title and the description of the feed.
+print feed.title, "|", feed.description
+
+for item in feed.items:
+    print "-" * 40
+    print "- Title       :", item.title
+    print "- Link        :", item.link
+    print "- Description :", web.html.plain(item.description)
+    print "- Date        :", item.date
+    print "- Author      :", item.author
+
+print item.description
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example5.py blah/standalone-examples/_web_example5.py
--- nodebox-web-1.9.2/standalone-examples/_web_example5.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example5.py	2008-04-05 23:17:15.000000000 +0100
@@ -0,0 +1,25 @@
+# Wikipedia articles.
+
+from nodebox_web import web
+from nodebox_web.web import wikipedia
+
+q = "Finland"
+article = wikipedia.search(q, language="nl")
+
+# Print the article title.
+print article.title
+
+# Get a list of all the links to other articles.
+# We can supply these to a new search.
+print article.links
+
+# The title of each paragraph
+for p in article.paragraphs: 
+    print p.title
+    #print "-"*40
+    #print p
+
+print article.paragraphs[0]
+
+print
+print article.references[0]
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example6.py blah/standalone-examples/_web_example6.py
--- nodebox-web-1.9.2/standalone-examples/_web_example6.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example6.py	2008-04-05 23:28:50.000000000 +0100
@@ -0,0 +1,16 @@
+# Retrieve images from MorgueFile.
+
+from nodebox_web import web
+from nodebox_web.web import morguefile
+
+q = "cloud"
+img = morguefile.search(q)[0]
+
+print img
+
+# A morgueFile image in the list has 
+# a number of methods and properties.
+# The download() method caches the image locally 
+# and returns the path to the file.
+img = img.download()
+
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example7.py blah/standalone-examples/_web_example7.py
--- nodebox-web-1.9.2/standalone-examples/_web_example7.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example7.py	2008-04-06 13:56:13.000000000 +0100
@@ -0,0 +1,18 @@
+# Color themes from Kuler.
+
+from nodebox_web import web
+from nodebox_web.web import kuler
+
+# Get the current most popular themes.
+themes = kuler.search_by_popularity()
+
+# the code below assumes the availability of methods that are defined in other
+# parts of the nodebox library
+#
+# Display colors from the first theme.
+#for i in range(100):
+#    for r, g, b in themes[0]:
+#        fill(r, g, b, 0.8)
+#        rotate(random(360))
+#        s = random(50) + 10
+#        oval(random(300), random(300), s, s)
diff -Nur nodebox-web-1.9.2/standalone-examples/_web_example9.py blah/standalone-examples/_web_example9.py
--- nodebox-web-1.9.2/standalone-examples/_web_example9.py	1970-01-01 01:00:00.000000000 +0100
+++ blah/standalone-examples/_web_example9.py	2008-04-05 23:38:11.000000000 +0100
@@ -0,0 +1,13 @@
+# Clearing the cache.
+
+from nodebox_web import web
+
+# Queries and images are cached locally for speed,
+# so it's a good idea to empty the cache now and then.
+# Also, when a query fails (internet is down etc.),
+# this "bad" query is also cached.
+# Then you may want to clear the cache of the specific
+# portion of the library you're working with,
+# for example: morguefile.clear_cache()
+
+web.clear_cache()