File: webforms_aggregator_tests.py

package info (click to toggle)
chromium 145.0.7632.159-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 5,976,224 kB
  • sloc: cpp: 36,198,469; ansic: 7,634,080; javascript: 3,564,060; python: 1,649,622; xml: 838,470; asm: 717,087; pascal: 185,708; sh: 88,786; perl: 88,718; objc: 79,984; sql: 59,811; cs: 42,452; fortran: 24,101; makefile: 21,144; tcl: 15,277; php: 14,022; yacc: 9,066; ruby: 7,553; awk: 3,720; lisp: 3,233; lex: 1,328; ada: 727; jsp: 228; sed: 36
file content (56 lines) | stat: -rwxr-xr-x 1,944 bytes parent folder | download | duplicates (7)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#!/usr/bin/env python
# Copyright 2011 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import os
import tempfile
import unittest

import webforms_aggregator


class WebformsAggregatorTest(unittest.TestCase):
  """Unit tests for the webforms_aggregator module."""

  def setUp(self):
    self.cookie_file = 'test.cookie'
    self.url1 = 'http://www.google.com'
    self.url2 = 'http://www.macys.com'
    self.domain = 'google.com'
    self.url_file = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)
    self.url_file.file.write(
        'URLs to crawl:\n%s\n%s\n' % (self.url1, self.url2))
    self.url_file.close()

  def tearDown(self):
    if os.path.isfile(self.cookie_file):
      os.unlink(self.cookie_file)
    if os.path.isfile(self.url_file.name):
      self.url_file.close()
      os.unlink(self.url_file.name)

  def testRetrieverDownloadsPage(self):
    """Verify the retriever can download a page."""
    r = webforms_aggregator.Retriever(self.url1, self.domain, self.cookie_file)
    self.assertTrue(r.Download(),
                    msg='Retriever could not download "%s"' % self.url1)

  def testCrawlerFindsRegPageFromUrl(self):
    """Verify that the crawler is able to find a reg page from the given URL."""
    c = webforms_aggregator.Crawler(self.url2)
    self.assertTrue(
        c.Run(), msg='Crawler could not find the reg page of "%s"' % self.url2)

  def testThreadedCrawlerFindsRegPageFromUrlsFile(self):
    """Verify the threaded crawler finds reg page from a file of URLs."""
    c = webforms_aggregator.ThreadedCrawler(self.url_file.name)
    self.assertNotEqual(
        c.Run(), -1,
        msg='Threaded crawler could not find the reg page from the URLs file')


if __name__ == '__main__':
  suite = unittest.TestLoader().loadTestsFromTestCase(
      WebformsAggregatorTest)
  unittest.TextTestRunner(verbosity=2).run(suite)