File: migrate.bs

package info (click to toggle)
storm-lang 0.7.4-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 52,004 kB
  • sloc: ansic: 261,462; cpp: 140,405; sh: 14,891; perl: 9,846; python: 2,525; lisp: 2,504; asm: 860; makefile: 678; pascal: 70; java: 52; xml: 37; awk: 12
file content (228 lines) | stat: -rw-r--r-- 6,954 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
/**
 * Options when migrating a database.
 */
enum MigrationPolicy : bitmask {
	// No migration at all, just quit if the structure does not match.
	none = 0x0,

	// Default policy - do non-destructive migrations automatically.
	default = 0x1,

	// Allow removal of columns.
	allowRemoval,

	// Allow columns to be in a different order. The database DSL *should* not care about the order,
	// but this has not currently been thoroughly verified.
	allowReorder,
}

// Check so that the database provided by the connection 'db' has the structure described by
// 'contents'. If this is not the case, it attempts to migrate the database to match the
// expectations. Data is migrated according to the supplied policy.
void verifyDatabaseSchema(DBConnection db, Database contents, MigrationPolicy policy) {
	Migration migration;
	Bool implicitAutoIncrement = db.features.has(DBFeatures:implicitAutoIncrement);

	for (table in contents.tables) {
		// Accommodate for implicit autoincrement here already, this makes it easier to understand
		// error messages, and simplifies the rest of the implementation.
		if (!implicitAutoIncrement) {
			Nat autoCol = table.implicitAutoIncrementColumn();
			if (autoCol < table.columns.count) {
				table = table.clone();
				table.columns[autoCol].autoIncrement = true;
			}
		}

		if (schema = db.schema(table.name)) {
			// Modify the table and any associated indices.
			verifyTable(schema, table, policy, migration);
			verifyIndices(table.name, schema.indices, table.indices, migration);
		} else {
			// Create the table and associated indices.
			migration.tableAdd << table.toSchema();
		}
	}

	// TODO: Should we remove tables if allowRemoval is in the policy?

	if (migration.any) {
		db.migrate(migration);
	}
}


// Check a particular table for differences and add required migration steps to the migration object.
private void verifyTable(Schema current, Table desired, MigrationPolicy policy, Migration migration) {
	Str table = desired.name;

	// Which columns in 'desired' are found already?
	Bool[] found = Bool[](desired.columns.count, false);

	// Current column ID in the modified version of the table.
	Nat updatedColId = 0;

	// Table migration.
	Migration:Table out(table);

	// Go through columns and validate existing ones:
	for (currentCol in current) {
		// Go through 'desired' to find the first column that is not found previously and that
		// matches what we are looking for.
		Nat desiredId = found.count;
		for (i, desiredCol in desired.columns) {
			if (found[i])
				continue;
			if (desiredCol.name == currentCol.name) {
				found[i] = true;
				desiredId = i;
				break;
			}
		}

		// If we did not find one, it means that the column was removed.
		if (desiredId >= found.count) {
			if (policy.has(MigrationPolicy:allowRemoval))
				out.colRemove << currentCol.name;
			else
				throw SchemaError("The column ${table}.${currentCol.name} does not exist in the schema."
								+ " It will not be automatically removed unless 'allowRemoval' is specified.",
								desired, current);
			continue;
		}

		// Unless 'allowReorder' was specified, we need the columns to be in the right order.
		if (!policy.has(MigrationPolicy:allowReorder)) {
			if (updatedColId != desiredId)
				throw SchemaError("The column ${table}.${currentCol.name} is in position ${updatedColId} in "
								+ "the database, but at ${desiredId} in the schema. Use 'allowReorder' to "
								+ "accept this disrepancy.", desired, current);
		}

		// We found a column, check if we need to do something with it.
		Column desiredCol = desired.columns[desiredId];
		if (!verifyColumn(currentCol, desiredCol, out))
			throw SchemaError("The column ${table}.${currentCol.name} has an incompatible type in the database "
							+ "and in the schema.", desired, current);
		updatedColId++;
	}

	// See if we need to add anything.
	for (desiredId, desiredCol in desired.columns) {
		if (found[desiredId])
			continue;

		if (!policy.has(MigrationPolicy:allowReorder)) {
			if (updatedColId != desiredId)
				throw SchemaError("The column ${table}.${desiredCol.name} is in position ${updatedColId} in "
								+ "the database, but at ${desiredId} in the schema. Use 'allowReorder' to "
								+ "accept this disrepancy.", desired, current);
		}

		out.colAdd << desiredCol.toSchema();
		updatedColId++;
	}

	// Compute the set of primary keys for both tables and see if they differ.
	{
		Str[] desiredPK;
		for (desiredCol in desired.columns)
			if (desiredCol.primaryKey)
				desiredPK << desiredCol.name;

		Nat foundCount = 0;
		Bool currentPK = false;
		for (currentCol in current) {
			if (!currentCol.attributes.has(Schema:Attributes:primaryKey))
				continue;

			currentPK = true;

			// Note: Two columns can not have the same name, so we don't have to check for that here.
			Bool found = false;
			for (i, x in desiredPK) {
				if (currentCol.name == x) {
					foundCount++;
					found = true;
					break;
				}
			}

			if (!found) {
				out.updatePrimaryKeys = true;
				break;
			}
		}

		if (foundCount != desiredPK.count)
			out.updatePrimaryKeys = true;

		if (out.updatePrimaryKeys) {
			out.dropPrimaryKeys = currentPK;
			out.primaryKeys = desiredPK;
		}
	}

	if (out.any)
		migration.tableMigrate << out;
}

// Check properties on columns, and migrate if necessary. Note that primary keys need to be handled
// separately due how to SQL works (we need to know the full set of keys that are a part of the
// primary key to change it).
private Bool verifyColumn(Schema:Column current, Column desired, Migration:Table migration) {
	if (!desired.datatype.sqlType.compatible(current.type))
		return false;

	var desiredSchema = desired.toSchema();

	Migration:ColAttrs update(desired.name, desired.datatype.sqlType);
	update.currentAttributes = current.attributes - Schema:Attributes:primaryKey;
	update.desiredAttributes = desiredSchema.attributes - Schema:Attributes:primaryKey;
	update.currentDefault = current.default;
	update.desiredDefault = desiredSchema.default;

	if (update.any)
		migration.colMigrate << update;

	true;
}

// Check indices in a particular table and update as required.
private void verifyIndices(Str table, Schema:Index[] current, Index[] desired, Migration migration) {
	Set<Str> currentSet;
	for (x in current)
		currentSet.put(hashIndex(x));
	Set<Str> desiredSet;
	for (x in desired)
		desiredSet.put(hashIndex(x));

	// What to remove?
	for (x in current) {
		if (desiredSet.has(hashIndex(x)))
			continue;

		migration.indexRemove << Migration:Index(table, x.name, []);
	}

	// What to add?
	for (x in desired) {
		if (currentSet.has(hashIndex(x)))
			continue;

		migration.indexAdd << Migration:Index(table, x.toSchema());
	}
}


// Produce a "hash" of an index.
private Str hashIndex(Index i) {
	StrBuf b;
	b << i.name << "|" << join(i.columns, ",");
	b.toS;
}
private Str hashIndex(Schema:Index i) {
	StrBuf b;
	b << i.name << "|" << join(i.columns, ",");
	b.toS;
}