1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
|
// These are specialized integration tests. We only build them when we're doing
// a lot of additional work to keep the external docker environment they require
// working.
// +build integration
package main
import (
"os"
"testing"
. "gopkg.in/check.v1"
"database/sql"
"fmt"
_ "github.com/lib/pq"
"github.com/prometheus/client_golang/prometheus"
)
// Hook up gocheck into the "go test" runner.
func Test(t *testing.T) { TestingT(t) }
type IntegrationSuite struct {
e *Exporter
}
var _ = Suite(&IntegrationSuite{})
func (s *IntegrationSuite) SetUpSuite(c *C) {
dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(dsn, false, "")
c.Assert(exporter, NotNil)
// Assign the exporter to the suite
s.e = exporter
prometheus.MustRegister(exporter)
}
// TODO: it would be nice if cu didn't mostly just recreate the scrape function
func (s *IntegrationSuite) TestAllNamespacesReturnResults(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
// Open a database connection
db, err := sql.Open("postgres", s.e.dsn)
c.Assert(db, NotNil)
c.Assert(err, IsNil)
defer db.Close()
// Do a version update
err = s.e.checkMapVersions(ch, db)
c.Assert(err, IsNil)
err = querySettings(ch, db)
if !c.Check(err, Equals, nil) {
fmt.Println("## ERRORS FOUND")
fmt.Println(err)
}
// This should never happen in our test cases.
errMap := queryNamespaceMappings(ch, db, s.e.metricMap, s.e.queryOverrides)
if !c.Check(len(errMap), Equals, 0) {
fmt.Println("## NAMESPACE ERRORS FOUND")
for namespace, err := range errMap {
fmt.Println(namespace, ":", err)
}
}
}
// TestInvalidDsnDoesntCrash tests that specifying an invalid DSN doesn't crash
// the exporter. Related to https://github.com/wrouesnel/postgres_exporter/issues/93
// although not a replication of the scenario.
func (s *IntegrationSuite) TestInvalidDsnDoesntCrash(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
// Send a bad DSN
exporter := NewExporter("invalid dsn", false, *queriesPath)
c.Assert(exporter, NotNil)
exporter.scrape(ch)
// Send a DSN to a non-listening port.
exporter = NewExporter("postgresql://nothing:nothing@127.0.0.1:1/nothing", false, *queriesPath)
c.Assert(exporter, NotNil)
exporter.scrape(ch)
}
// TestUnknownMetricParsingDoesntCrash deliberately deletes all the column maps out
// of an exporter to test that the default metric handling code can cope with unknown columns.
func (s *IntegrationSuite) TestUnknownMetricParsingDoesntCrash(c *C) {
// Setup a dummy channel to consume metrics
ch := make(chan prometheus.Metric, 100)
go func() {
for range ch {
}
}()
dsn := os.Getenv("DATA_SOURCE_NAME")
c.Assert(dsn, Not(Equals), "")
exporter := NewExporter(dsn, false, "")
c.Assert(exporter, NotNil)
// Convert the default maps into a list of empty maps.
emptyMaps := make(map[string]map[string]ColumnMapping, 0)
for k := range exporter.builtinMetricMaps {
emptyMaps[k] = map[string]ColumnMapping{}
}
exporter.builtinMetricMaps = emptyMaps
// scrape the exporter and make sure it works
exporter.scrape(ch)
}
|