1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
|
### CREDITS ##########################################################################################
# Copyright (c) 2007 Tom De Smedt.
# See LICENSE.txt for details.
__author__ = "Tom De Smedt"
__version__ = "1.9.2.2"
__copyright__ = "Copyright (c) 2007 Tom De Smedt"
__license__ = "GPL"
### NODEBOX WEB LIBRARY #############################################################################
# The NodeBox Web library offers a collection of services to retrieve content from the internet.
# You can use the library to query Yahoo! for links, images, news and spelling suggestions,
# to read RSS and Atom newsfeeds, to retrieve articles from Wikipedia, to collect quality images
# from morgueFile, to get color themes from kuler , to browse through HTML documents, to clean up HTML,
# to validate URL's, to create GIF images from math equations using mimeTeX, to get ironic word
# definitions from Urban Dictionary.
# The NodeBox Web library works with a caching mechanism that stores things you download from the web,
# so they can be retrieved faster the next time. Many of the services also work asynchronously.
# This means you can use the library in an animation that keeps on running while new content is downloaded
# in the background.
# The library bundles Leonard Richardson's Beautiful Soup to parse HTM,
# Mark Pilgrim's Universal Feed Parser for newsfeeds, a connection to John Forkosh's mimeTeX server,
# and Leif K-Brooks entity replace algorithm.
######################################################################################################
import cache
import url
import html
import page
import yahoo
import newsfeed
import wikipedia
import morguefile
import flickr
import kuler
import colr
import mimetex
import urbandictionary
def is_url(url_, wait=10):
return url.is_url(url_, wait)
def download(url_, wait=60, cache=None, type=".html"):
return url.retrieve(url_, wait, False, cache, type).data
def clear_cache():
page.clear_cache()
yahoo.clear_cache()
newsfeed.clear_cache()
wikipedia.clear_cache()
morguefile.clear_cache()
flickr.clear_cache()
kuler.clear_cache()
colr.clear_cache()
mimetex.clear_cache()
urbandictionary.clear_cache()
|