File: adjust_imports.diff

package info (click to toggle)
nodebox-web 1.9.4.6-2
  • links: PTS, VCS
  • area: main
  • in suites: wheezy
  • size: 1,900 kB
  • sloc: python: 7,582; ansic: 581; xml: 239; makefile: 2
file content (150 lines) | stat: -rw-r--r-- 4,702 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
# Description: adjust module imports
# Author: Serafeim Zanikolas <serzan@hellug.gr>
# Last-Update: 2009-02-20
# Forwarded: not-needed
Index: nodebox-web/colr.py
===================================================================
--- nodebox-web.orig/colr.py	2009-02-20 23:54:55.000000000 +0000
+++ nodebox-web/colr.py	2009-02-20 23:54:59.000000000 +0000
@@ -1,6 +1,6 @@
-from url import URLAccumulator
+from nodebox_web.web.url import URLAccumulator
 from urllib import quote
-from cache import Cache
+from nodebox_web.web.cache import Cache
 import simplejson
 
 def clear_cache():
@@ -65,7 +65,7 @@
 
     def draw(self, x, y, w=40, h=40):
         
-        try: from web import _ctx
+        try: from nodebox_web.web.web import _ctx
         except: pass
         
         from nodebox.graphics import RGB
@@ -183,4 +183,4 @@
 #size(500, 650)
 #themes = search("office")
 #theme = themes[0]
-#preview(theme)
\ No newline at end of file
+#preview(theme)
Index: nodebox-web/flickr.py
===================================================================
--- nodebox-web.orig/flickr.py	2009-02-20 23:54:55.000000000 +0000
+++ nodebox-web/flickr.py	2009-02-20 23:54:59.000000000 +0000
@@ -1,8 +1,8 @@
 from urllib import quote_plus
-from url import URLAccumulator
+from nodebox_web.web.url import URLAccumulator
 from xml.dom.minidom import parseString
 import os
-from cache import Cache
+from nodebox_web.web.cache import Cache
 
 API_KEY = "787081027f43b0412ba41142d4540480"
 
Index: nodebox-web/mimetex.py
===================================================================
--- nodebox-web.orig/mimetex.py	2009-02-20 23:54:55.000000000 +0000
+++ nodebox-web/mimetex.py	2009-02-20 23:54:59.000000000 +0000
@@ -5,9 +5,9 @@
 # Copyright (c) 2007 by Tom De Smedt.
 # See LICENSE.txt for details.
 
-from url import URLAccumulator
+from nodebox_web.web.url import URLAccumulator
 from urllib import quote
-from cache import Cache
+from nodebox_web.web.cache import Cache
 
 def clear_cache():
     Cache("mimetex").clear()
@@ -33,4 +33,4 @@
     return mimeTeX(eq).image
 
 #eq = "E = hf = \frac{hc}{\lambda} \,\! "
-#image(gif(eq), 10, 10)
\ No newline at end of file
+#image(gif(eq), 10, 10)
Index: nodebox-web/morguefile.py
===================================================================
--- nodebox-web.orig/morguefile.py	2009-02-20 23:54:55.000000000 +0000
+++ nodebox-web/morguefile.py	2009-02-20 23:54:59.000000000 +0000
@@ -9,8 +9,8 @@
 from urllib import quote_plus
 from xml.dom.minidom import parseString
 
-from url import URLAccumulator
-from cache import Cache
+from nodebox_web.web.url import URLAccumulator
+from nodebox_web.web.cache import Cache
 
 def clear_cache():
     Cache("morguefile").clear()
@@ -159,4 +159,4 @@
 
 #img = images[0]
 #img.download()
-#image(img.path, 0, 0)
\ No newline at end of file
+#image(img.path, 0, 0)
Index: nodebox-web/__init__.py
===================================================================
--- nodebox-web.orig/__init__.py	2009-02-20 23:54:55.000000000 +0000
+++ nodebox-web/__init__.py	2009-02-20 23:54:59.000000000 +0000
@@ -34,7 +34,6 @@
 import html
 import page
 import simplejson
-import json # wrapper for simplejson, backward compatibility.
 
 packages = [
     "yahoo", "google", 
@@ -97,4 +96,4 @@
 # url.parse() has a new .filename attribute (equals .page).
 # Handy web.save() command downloads data and saves it to a given path.
 # hex_to_rgb() improvement for hex strings shorter than 6 characters.
-# Upgraded to BeautifulSoup 3.0.7a
\ No newline at end of file
+# Upgraded to BeautifulSoup 3.0.7a
Index: nodebox-web/newsfeed.py
===================================================================
--- nodebox-web.orig/newsfeed.py	2009-02-20 23:54:55.000000000 +0000
+++ nodebox-web/newsfeed.py	2009-02-20 23:55:13.000000000 +0000
@@ -8,7 +8,7 @@
 
 import os
 
-from feedparser import feedparser
+import feedparser
 
 from url import URLAccumulator
 from html import strip_tags
@@ -159,4 +148,4 @@
     print "Author:", item.author
     print ">>", item.author_detail.name
     print ">>", item.author_detail.email
-"""
\ No newline at end of file
+"""
Index: nodebox-web/urbandictionary.py
===================================================================
--- nodebox-web.orig/urbandictionary.py	2009-02-20 23:54:55.000000000 +0000
+++ nodebox-web/urbandictionary.py	2009-02-20 23:54:59.000000000 +0000
@@ -1,5 +1,5 @@
 import url
-import soap
+import SOAPpy as soap
 import re
 from cache import Cache
 import pickle
@@ -74,4 +74,4 @@
                 self.append(ubd)
             
 def search(q, cached=True):
-    return UrbanDictionary(q, cached)
\ No newline at end of file
+    return UrbanDictionary(q, cached)