File: copier.go

package info (click to toggle)
golang-github-containers-buildah 1.19.6%2Bdfsg1-1
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 5,020 kB
  • sloc: sh: 1,957; makefile: 199; perl: 173; awk: 12; ansic: 1
file content (1683 lines) | stat: -rw-r--r-- 60,146 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
package copier

import (
	"archive/tar"
	"bytes"
	"encoding/json"
	"fmt"
	"io"
	"io/ioutil"
	"net"
	"os"
	"os/user"
	"path"
	"path/filepath"
	"strconv"
	"strings"
	"sync"
	"syscall"
	"time"

	"github.com/containers/buildah/util"
	"github.com/containers/image/v5/pkg/compression"
	"github.com/containers/storage/pkg/fileutils"
	"github.com/containers/storage/pkg/idtools"
	"github.com/containers/storage/pkg/reexec"
	"github.com/pkg/errors"
	"github.com/sirupsen/logrus"
)

const (
	copierCommand    = "buildah-copier"
	maxLoopsFollowed = 64
	// See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06, from archive/tar
	cISUID = 04000 // Set uid, from archive/tar
	cISGID = 02000 // Set gid, from archive/tar
	cISVTX = 01000 // Save text (sticky bit), from archive/tar
)

func init() {
	reexec.Register(copierCommand, copierMain)
	// Attempt a user and host lookup to force libc (glibc, and possibly others that use dynamic
	// modules to handle looking up user and host information) to load modules that match the libc
	// our binary is currently using.  Hopefully they're loaded on first use, so that they won't
	// need to be loaded after we've chrooted into the rootfs, which could include modules that
	// don't match our libc and which can't be loaded, or modules which we don't want to execute
	// because we don't trust their code.
	_, _ = user.Lookup("buildah")
	_, _ = net.LookupHost("localhost")
}

// isArchivePath returns true if the specified path can be read like a (possibly
// compressed) tarball.
func isArchivePath(path string) bool {
	f, err := os.Open(path)
	if err != nil {
		return false
	}
	defer f.Close()
	rc, _, err := compression.AutoDecompress(f)
	if err != nil {
		return false
	}
	defer rc.Close()
	tr := tar.NewReader(rc)
	_, err = tr.Next()
	return err == nil
}

// requestType encodes exactly what kind of request this is.
type requestType string

const (
	requestStat  requestType = "STAT"
	requestGet   requestType = "GET"
	requestPut   requestType = "PUT"
	requestMkdir requestType = "MKDIR"
	requestQuit  requestType = "QUIT"
)

// Request encodes a single request.
type request struct {
	Request            requestType
	Root               string // used by all requests
	preservedRoot      string
	rootPrefix         string // used to reconstruct paths being handed back to the caller
	Directory          string // used by all requests
	preservedDirectory string
	Globs              []string `json:",omitempty"` // used by stat, get
	preservedGlobs     []string
	StatOptions        StatOptions  `json:",omitempty"`
	GetOptions         GetOptions   `json:",omitempty"`
	PutOptions         PutOptions   `json:",omitempty"`
	MkdirOptions       MkdirOptions `json:",omitempty"`
}

func (req *request) Excludes() []string {
	switch req.Request {
	case requestStat:
		return req.StatOptions.Excludes
	case requestGet:
		return req.GetOptions.Excludes
	case requestPut:
		return nil
	case requestMkdir:
		return nil
	case requestQuit:
		return nil
	default:
		panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
	}
}

func (req *request) UIDMap() []idtools.IDMap {
	switch req.Request {
	case requestStat:
		return nil
	case requestGet:
		return req.GetOptions.UIDMap
	case requestPut:
		return req.PutOptions.UIDMap
	case requestMkdir:
		return req.MkdirOptions.UIDMap
	case requestQuit:
		return nil
	default:
		panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
	}
}

func (req *request) GIDMap() []idtools.IDMap {
	switch req.Request {
	case requestStat:
		return nil
	case requestGet:
		return req.GetOptions.GIDMap
	case requestPut:
		return req.PutOptions.GIDMap
	case requestMkdir:
		return req.MkdirOptions.GIDMap
	case requestQuit:
		return nil
	default:
		panic(fmt.Sprintf("not an implemented request type: %q", req.Request))
	}
}

// Response encodes a single response.
type response struct {
	Error string `json:",omitempty"`
	Stat  statResponse
	Get   getResponse
	Put   putResponse
	Mkdir mkdirResponse
}

// statResponse encodes a response for a single Stat request.
type statResponse struct {
	Globs []*StatsForGlob
}

// StatsForGlob encode results for a single glob pattern passed to Stat().
type StatsForGlob struct {
	Error   string                  `json:",omitempty"` // error if the Glob pattern was malformed
	Glob    string                  // input pattern to which this result corresponds
	Globbed []string                // a slice of zero or more names that match the glob
	Results map[string]*StatForItem // one for each Globbed value if there are any, or for Glob
}

// StatForItem encode results for a single filesystem item, as returned by Stat().
type StatForItem struct {
	Error           string `json:",omitempty"`
	Name            string
	Size            int64       // dereferenced value for symlinks
	Mode            os.FileMode // dereferenced value for symlinks
	ModTime         time.Time   // dereferenced value for symlinks
	IsSymlink       bool
	IsDir           bool   // dereferenced value for symlinks
	IsRegular       bool   // dereferenced value for symlinks
	IsArchive       bool   // dereferenced value for symlinks
	ImmediateTarget string `json:",omitempty"` // raw link content
}

// getResponse encodes a response for a single Get request.
type getResponse struct {
}

// putResponse encodes a response for a single Put request.
type putResponse struct {
}

// mkdirResponse encodes a response for a single Mkdir request.
type mkdirResponse struct {
}

// StatOptions controls parts of Stat()'s behavior.
type StatOptions struct {
	CheckForArchives bool     // check for and populate the IsArchive bit in returned values
	Excludes         []string // contents to pretend don't exist, using the OS-specific path separator
}

// Stat globs the specified pattern in the specified directory and returns its
// results.
// If root and directory are both not specified, the current root directory is
// used, and relative names in the globs list are treated as being relative to
// the current working directory.
// If root is specified and the current OS supports it, and the calling process
// has the necessary privileges, the stat() is performed in a chrooted context.
// If the directory is specified as an absolute path, it should either be the
// root directory or a subdirectory of the root directory.  Otherwise, the
// directory is treated as a path relative to the root directory.
// Relative names in the glob list are treated as being relative to the
// directory.
func Stat(root string, directory string, options StatOptions, globs []string) ([]*StatsForGlob, error) {
	req := request{
		Request:     requestStat,
		Root:        root,
		Directory:   directory,
		Globs:       append([]string{}, globs...),
		StatOptions: options,
	}
	resp, err := copier(nil, nil, req)
	if err != nil {
		return nil, err
	}
	if resp.Error != "" {
		return nil, errors.New(resp.Error)
	}
	return resp.Stat.Globs, nil
}

// GetOptions controls parts of Get()'s behavior.
type GetOptions struct {
	UIDMap, GIDMap     []idtools.IDMap   // map from hostIDs to containerIDs in the output archive
	Excludes           []string          // contents to pretend don't exist, using the OS-specific path separator
	ExpandArchives     bool              // extract the contents of named items that are archives
	ChownDirs          *idtools.IDPair   // set ownership on directories. no effect on archives being extracted
	ChmodDirs          *os.FileMode      // set permissions on directories. no effect on archives being extracted
	ChownFiles         *idtools.IDPair   // set ownership of files. no effect on archives being extracted
	ChmodFiles         *os.FileMode      // set permissions on files. no effect on archives being extracted
	StripSetuidBit     bool              // strip the setuid bit off of items being copied. no effect on archives being extracted
	StripSetgidBit     bool              // strip the setgid bit off of items being copied. no effect on archives being extracted
	StripStickyBit     bool              // strip the sticky bit off of items being copied. no effect on archives being extracted
	StripXattrs        bool              // don't record extended attributes of items being copied. no effect on archives being extracted
	KeepDirectoryNames bool              // don't strip the top directory's basename from the paths of items in subdirectories
	Rename             map[string]string // rename items with the specified names, or under the specified names
}

// Get produces an archive containing items that match the specified glob
// patterns and writes it to bulkWriter.
// If root and directory are both not specified, the current root directory is
// used, and relative names in the globs list are treated as being relative to
// the current working directory.
// If root is specified and the current OS supports it, and the calling process
// has the necessary privileges, the contents are read in a chrooted context.
// If the directory is specified as an absolute path, it should either be the
// root directory or a subdirectory of the root directory.  Otherwise, the
// directory is treated as a path relative to the root directory.
// Relative names in the glob list are treated as being relative to the
// directory.
func Get(root string, directory string, options GetOptions, globs []string, bulkWriter io.Writer) error {
	req := request{
		Request:   requestGet,
		Root:      root,
		Directory: directory,
		Globs:     append([]string{}, globs...),
		StatOptions: StatOptions{
			CheckForArchives: options.ExpandArchives,
		},
		GetOptions: options,
	}
	resp, err := copier(nil, bulkWriter, req)
	if err != nil {
		return err
	}
	if resp.Error != "" {
		return errors.New(resp.Error)
	}
	return nil
}

// PutOptions controls parts of Put()'s behavior.
type PutOptions struct {
	UIDMap, GIDMap       []idtools.IDMap   // map from containerIDs to hostIDs when writing contents to disk
	DefaultDirOwner      *idtools.IDPair   // set ownership of implicitly-created directories, default is ChownDirs, or 0:0 if ChownDirs not set
	DefaultDirMode       *os.FileMode      // set permissions on implicitly-created directories, default is ChmodDirs, or 0755 if ChmodDirs not set
	ChownDirs            *idtools.IDPair   // set ownership of newly-created directories
	ChmodDirs            *os.FileMode      // set permissions on newly-created directories
	ChownFiles           *idtools.IDPair   // set ownership of newly-created files
	ChmodFiles           *os.FileMode      // set permissions on newly-created files
	StripXattrs          bool              // don't bother trying to set extended attributes of items being copied
	IgnoreXattrErrors    bool              // ignore any errors encountered when attempting to set extended attributes
	IgnoreDevices        bool              // ignore items which are character or block devices
	NoOverwriteDirNonDir bool              // instead of quietly overwriting directories with non-directories, return an error
	Rename               map[string]string // rename items with the specified names, or under the specified names
}

// Put extracts an archive from the bulkReader at the specified directory.
// If root and directory are both not specified, the current root directory is
// used.
// If root is specified and the current OS supports it, and the calling process
// has the necessary privileges, the contents are written in a chrooted
// context.  If the directory is specified as an absolute path, it should
// either be the root directory or a subdirectory of the root directory.
// Otherwise, the directory is treated as a path relative to the root
// directory.
func Put(root string, directory string, options PutOptions, bulkReader io.Reader) error {
	req := request{
		Request:    requestPut,
		Root:       root,
		Directory:  directory,
		PutOptions: options,
	}
	resp, err := copier(bulkReader, nil, req)
	if err != nil {
		return err
	}
	if resp.Error != "" {
		return errors.New(resp.Error)
	}
	return nil
}

// MkdirOptions controls parts of Mkdir()'s behavior.
type MkdirOptions struct {
	UIDMap, GIDMap []idtools.IDMap // map from containerIDs to hostIDs when creating directories
	ChownNew       *idtools.IDPair // set ownership of newly-created directories
	ChmodNew       *os.FileMode    // set permissions on newly-created directories
}

// Mkdir ensures that the specified directory exists.  Any directories which
// need to be created will be given the specified ownership and permissions.
// If root and directory are both not specified, the current root directory is
// used.
// If root is specified and the current OS supports it, and the calling process
// has the necessary privileges, the directory is created in a chrooted
// context.  If the directory is specified as an absolute path, it should
// either be the root directory or a subdirectory of the root directory.
// Otherwise, the directory is treated as a path relative to the root
// directory.
func Mkdir(root string, directory string, options MkdirOptions) error {
	req := request{
		Request:      requestMkdir,
		Root:         root,
		Directory:    directory,
		MkdirOptions: options,
	}
	resp, err := copier(nil, nil, req)
	if err != nil {
		return err
	}
	if resp.Error != "" {
		return errors.New(resp.Error)
	}
	return nil
}

// cleanerReldirectory resolves relative path candidate lexically, attempting
// to ensure that when joined as a subdirectory of another directory, it does
// not reference anything outside of that other directory.
func cleanerReldirectory(candidate string) string {
	cleaned := strings.TrimPrefix(filepath.Clean(string(os.PathSeparator)+candidate), string(os.PathSeparator))
	if cleaned == "" {
		return "."
	}
	return cleaned
}

// convertToRelSubdirectory returns the path of directory, bound and relative to
// root, as a relative path, or an error if that path can't be computed or if
// the two directories are on different volumes
func convertToRelSubdirectory(root, directory string) (relative string, err error) {
	if root == "" || !filepath.IsAbs(root) {
		return "", errors.Errorf("expected root directory to be an absolute path, got %q", root)
	}
	if directory == "" || !filepath.IsAbs(directory) {
		return "", errors.Errorf("expected directory to be an absolute path, got %q", root)
	}
	if filepath.VolumeName(root) != filepath.VolumeName(directory) {
		return "", errors.Errorf("%q and %q are on different volumes", root, directory)
	}
	rel, err := filepath.Rel(root, directory)
	if err != nil {
		return "", errors.Wrapf(err, "error computing path of %q relative to %q", directory, root)
	}
	return cleanerReldirectory(rel), nil
}

func currentVolumeRoot() (string, error) {
	cwd, err := os.Getwd()
	if err != nil {
		return "", errors.Wrapf(err, "error getting current working directory")
	}
	return filepath.VolumeName(cwd) + string(os.PathSeparator), nil
}

func isVolumeRoot(candidate string) (bool, error) {
	abs, err := filepath.Abs(candidate)
	if err != nil {
		return false, errors.Wrapf(err, "error converting %q to an absolute path", candidate)
	}
	return abs == filepath.VolumeName(abs)+string(os.PathSeparator), nil
}

func looksLikeAbs(candidate string) bool {
	return candidate[0] == os.PathSeparator && (len(candidate) == 1 || candidate[1] != os.PathSeparator)
}

func copier(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, error) {
	if req.Directory == "" {
		if req.Root == "" {
			wd, err := os.Getwd()
			if err != nil {
				return nil, errors.Wrapf(err, "error getting current working directory")
			}
			req.Directory = wd
		} else {
			req.Directory = req.Root
		}
	}
	if req.Root == "" {
		root, err := currentVolumeRoot()
		if err != nil {
			return nil, errors.Wrapf(err, "error determining root of current volume")
		}
		req.Root = root
	}
	if filepath.IsAbs(req.Directory) {
		_, err := convertToRelSubdirectory(req.Root, req.Directory)
		if err != nil {
			return nil, errors.Wrapf(err, "error rewriting %q to be relative to %q", req.Directory, req.Root)
		}
	}
	isAlreadyRoot, err := isVolumeRoot(req.Root)
	if err != nil {
		return nil, errors.Wrapf(err, "error checking if %q is a root directory", req.Root)
	}
	if !isAlreadyRoot && canChroot {
		return copierWithSubprocess(bulkReader, bulkWriter, req)
	}
	return copierWithoutSubprocess(bulkReader, bulkWriter, req)
}

func copierWithoutSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, error) {
	req.preservedRoot = req.Root
	req.rootPrefix = string(os.PathSeparator)
	req.preservedDirectory = req.Directory
	req.preservedGlobs = append([]string{}, req.Globs...)
	if !filepath.IsAbs(req.Directory) {
		req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
	}
	absoluteGlobs := make([]string, 0, len(req.Globs))
	for _, glob := range req.preservedGlobs {
		if filepath.IsAbs(glob) {
			relativeGlob, err := convertToRelSubdirectory(req.preservedRoot, glob)
			if err != nil {
				fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", glob, req.preservedRoot, err)
				os.Exit(1)
			}
			absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Root, string(os.PathSeparator)+relativeGlob))
		} else {
			absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(glob)))
		}
	}
	req.Globs = absoluteGlobs
	resp, cb, err := copierHandler(bulkReader, bulkWriter, req)
	if err != nil {
		return nil, err
	}
	if cb != nil {
		if err = cb(); err != nil {
			return nil, err
		}
	}
	return resp, nil
}

func closeIfNotNilYet(f **os.File, what string) {
	if f != nil && *f != nil {
		err := (*f).Close()
		*f = nil
		if err != nil {
			logrus.Debugf("error closing %s: %v", what, err)
		}
	}
}

func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req request) (resp *response, err error) {
	if bulkReader == nil {
		bulkReader = bytes.NewReader([]byte{})
	}
	if bulkWriter == nil {
		bulkWriter = ioutil.Discard
	}
	cmd := reexec.Command(copierCommand)
	stdinRead, stdinWrite, err := os.Pipe()
	if err != nil {
		return nil, errors.Wrapf(err, "pipe")
	}
	defer closeIfNotNilYet(&stdinRead, "stdin pipe reader")
	defer closeIfNotNilYet(&stdinWrite, "stdin pipe writer")
	encoder := json.NewEncoder(stdinWrite)
	stdoutRead, stdoutWrite, err := os.Pipe()
	if err != nil {
		return nil, errors.Wrapf(err, "pipe")
	}
	defer closeIfNotNilYet(&stdoutRead, "stdout pipe reader")
	defer closeIfNotNilYet(&stdoutWrite, "stdout pipe writer")
	decoder := json.NewDecoder(stdoutRead)
	bulkReaderRead, bulkReaderWrite, err := os.Pipe()
	if err != nil {
		return nil, errors.Wrapf(err, "pipe")
	}
	defer closeIfNotNilYet(&bulkReaderRead, "child bulk content reader pipe, read end")
	defer closeIfNotNilYet(&bulkReaderWrite, "child bulk content reader pipe, write end")
	bulkWriterRead, bulkWriterWrite, err := os.Pipe()
	if err != nil {
		return nil, errors.Wrapf(err, "pipe")
	}
	defer closeIfNotNilYet(&bulkWriterRead, "child bulk content writer pipe, read end")
	defer closeIfNotNilYet(&bulkWriterWrite, "child bulk content writer pipe, write end")
	cmd.Dir = "/"
	cmd.Env = append([]string{fmt.Sprintf("LOGLEVEL=%d", logrus.GetLevel())}, os.Environ()...)

	errorBuffer := bytes.Buffer{}
	cmd.Stdin = stdinRead
	cmd.Stdout = stdoutWrite
	cmd.Stderr = &errorBuffer
	cmd.ExtraFiles = []*os.File{bulkReaderRead, bulkWriterWrite}
	if err = cmd.Start(); err != nil {
		return nil, errors.Wrapf(err, "error starting subprocess")
	}
	cmdToWaitFor := cmd
	defer func() {
		if cmdToWaitFor != nil {
			if err := cmdToWaitFor.Wait(); err != nil {
				if errorBuffer.String() != "" {
					logrus.Debug(errorBuffer.String())
				}
			}
		}
	}()
	stdinRead.Close()
	stdinRead = nil
	stdoutWrite.Close()
	stdoutWrite = nil
	bulkReaderRead.Close()
	bulkReaderRead = nil
	bulkWriterWrite.Close()
	bulkWriterWrite = nil
	killAndReturn := func(err error, step string) (*response, error) { // nolint: unparam
		if err2 := cmd.Process.Kill(); err2 != nil {
			return nil, errors.Wrapf(err, "error killing subprocess: %v; %s", err2, step)
		}
		return nil, errors.Wrap(err, step)
	}
	if err = encoder.Encode(req); err != nil {
		return killAndReturn(err, "error encoding request for copier subprocess")
	}
	if err = decoder.Decode(&resp); err != nil {
		return killAndReturn(err, "error decoding response from copier subprocess")
	}
	if err = encoder.Encode(&request{Request: requestQuit}); err != nil {
		return killAndReturn(err, "error encoding request for copier subprocess")
	}
	stdinWrite.Close()
	stdinWrite = nil
	stdoutRead.Close()
	stdoutRead = nil
	var wg sync.WaitGroup
	var readError, writeError error
	wg.Add(1)
	go func() {
		_, writeError = io.Copy(bulkWriter, bulkWriterRead)
		bulkWriterRead.Close()
		bulkWriterRead = nil
		wg.Done()
	}()
	wg.Add(1)
	go func() {
		_, readError = io.Copy(bulkReaderWrite, bulkReader)
		bulkReaderWrite.Close()
		bulkReaderWrite = nil
		wg.Done()
	}()
	wg.Wait()
	cmdToWaitFor = nil
	if err = cmd.Wait(); err != nil {
		if errorBuffer.String() != "" {
			err = fmt.Errorf("%s", errorBuffer.String())
		}
		return nil, err
	}
	if cmd.ProcessState.Exited() && !cmd.ProcessState.Success() {
		err = fmt.Errorf("subprocess exited with error")
		if errorBuffer.String() != "" {
			err = fmt.Errorf("%s", errorBuffer.String())
		}
		return nil, err
	}
	loggedOutput := strings.TrimSuffix(errorBuffer.String(), "\n")
	if len(loggedOutput) > 0 {
		for _, output := range strings.Split(loggedOutput, "\n") {
			logrus.Debug(output)
		}
	}
	if readError != nil {
		return nil, errors.Wrapf(readError, "error passing bulk input to subprocess")
	}
	if writeError != nil {
		return nil, errors.Wrapf(writeError, "error passing bulk output from subprocess")
	}
	return resp, nil
}

func copierMain() {
	var chrooted bool
	decoder := json.NewDecoder(os.Stdin)
	encoder := json.NewEncoder(os.Stdout)
	previousRequestRoot := ""

	// Set logging.
	if level := os.Getenv("LOGLEVEL"); level != "" {
		if ll, err := strconv.Atoi(level); err == nil {
			logrus.SetLevel(logrus.Level(ll))
		}
	}

	// Set up descriptors for receiving and sending tarstreams.
	bulkReader := os.NewFile(3, "bulk-reader")
	bulkWriter := os.NewFile(4, "bulk-writer")

	for {
		// Read a request.
		req := new(request)
		if err := decoder.Decode(req); err != nil {
			fmt.Fprintf(os.Stderr, "error decoding request from copier parent process: %v", err)
			os.Exit(1)
		}
		if req.Request == requestQuit {
			// Making Quit a specific request means that we could
			// run Stat() at a caller's behest before using the
			// same process for Get() or Put().  Maybe later.
			break
		}

		// Multiple requests should list the same root, because we
		// can't un-chroot to chroot to some other location.
		if previousRequestRoot != "" {
			// Check that we got the same input value for
			// where-to-chroot-to.
			if req.Root != previousRequestRoot {
				fmt.Fprintf(os.Stderr, "error: can't change location of chroot from %q to %q", previousRequestRoot, req.Root)
				os.Exit(1)
			}
			previousRequestRoot = req.Root
		} else {
			// Figure out where to chroot to, if we weren't told.
			if req.Root == "" {
				root, err := currentVolumeRoot()
				if err != nil {
					fmt.Fprintf(os.Stderr, "error determining root of current volume: %v", err)
					os.Exit(1)
				}
				req.Root = root
			}
			// Change to the specified root directory.
			var err error
			chrooted, err = chroot(req.Root)
			if err != nil {
				fmt.Fprintf(os.Stderr, "error changing to intended-new-root directory %q: %v", req.Root, err)
				os.Exit(1)
			}
		}

		req.preservedRoot = req.Root
		req.rootPrefix = string(os.PathSeparator)
		req.preservedDirectory = req.Directory
		req.preservedGlobs = append([]string{}, req.Globs...)
		if chrooted {
			// We'll need to adjust some things now that the root
			// directory isn't what it was.  Make the directory and
			// globs absolute paths for simplicity's sake.
			absoluteDirectory := req.Directory
			if !filepath.IsAbs(req.Directory) {
				absoluteDirectory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
			}
			relativeDirectory, err := convertToRelSubdirectory(req.preservedRoot, absoluteDirectory)
			if err != nil {
				fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", absoluteDirectory, req.preservedRoot, err)
				os.Exit(1)
			}
			req.Directory = filepath.Clean(string(os.PathSeparator) + relativeDirectory)
			absoluteGlobs := make([]string, 0, len(req.Globs))
			for i, glob := range req.preservedGlobs {
				if filepath.IsAbs(glob) {
					relativeGlob, err := convertToRelSubdirectory(req.preservedRoot, glob)
					if err != nil {
						fmt.Fprintf(os.Stderr, "error rewriting %q to be relative to %q: %v", glob, req.preservedRoot, err)
						os.Exit(1)
					}
					absoluteGlobs = append(absoluteGlobs, filepath.Clean(string(os.PathSeparator)+relativeGlob))
				} else {
					absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(req.Globs[i])))
				}
			}
			req.Globs = absoluteGlobs
			req.rootPrefix = req.Root
			req.Root = string(os.PathSeparator)
		} else {
			// Make the directory and globs absolute paths for
			// simplicity's sake.
			if !filepath.IsAbs(req.Directory) {
				req.Directory = filepath.Join(req.Root, cleanerReldirectory(req.Directory))
			}
			absoluteGlobs := make([]string, 0, len(req.Globs))
			for i, glob := range req.preservedGlobs {
				if filepath.IsAbs(glob) {
					absoluteGlobs = append(absoluteGlobs, req.Globs[i])
				} else {
					absoluteGlobs = append(absoluteGlobs, filepath.Join(req.Directory, cleanerReldirectory(req.Globs[i])))
				}
			}
			req.Globs = absoluteGlobs
		}
		resp, cb, err := copierHandler(bulkReader, bulkWriter, *req)
		if err != nil {
			fmt.Fprintf(os.Stderr, "error handling request %#v from copier parent process: %v", *req, err)
			os.Exit(1)
		}
		// Encode the response.
		if err := encoder.Encode(resp); err != nil {
			fmt.Fprintf(os.Stderr, "error encoding response %#v for copier parent process: %v", *req, err)
			os.Exit(1)
		}
		// If there's bulk data to transfer, run the callback to either
		// read or write it.
		if cb != nil {
			if err = cb(); err != nil {
				fmt.Fprintf(os.Stderr, "error during bulk transfer for %#v: %v", *req, err)
				os.Exit(1)
			}
		}
	}
}

func copierHandler(bulkReader io.Reader, bulkWriter io.Writer, req request) (*response, func() error, error) {
	// NewPatternMatcher splits patterns into components using
	// os.PathSeparator, implying that it expects OS-specific naming
	// conventions.
	excludes := req.Excludes()
	pm, err := fileutils.NewPatternMatcher(excludes)
	if err != nil {
		return nil, nil, errors.Wrapf(err, "error processing excludes list %v", excludes)
	}

	var idMappings *idtools.IDMappings
	uidMap, gidMap := req.UIDMap(), req.GIDMap()
	if len(uidMap) > 0 && len(gidMap) > 0 {
		idMappings = idtools.NewIDMappingsFromMaps(uidMap, gidMap)
	}

	switch req.Request {
	default:
		return nil, nil, errors.Errorf("not an implemented request type: %q", req.Request)
	case requestStat:
		resp := copierHandlerStat(req, pm)
		return resp, nil, nil
	case requestGet:
		return copierHandlerGet(bulkWriter, req, pm, idMappings)
	case requestPut:
		return copierHandlerPut(bulkReader, req, idMappings)
	case requestMkdir:
		return copierHandlerMkdir(req, idMappings)
	case requestQuit:
		return nil, nil, nil
	}
}

// pathIsExcluded computes path relative to root, then asks the pattern matcher
// if the result is excluded.  Returns the relative path and the matcher's
// results.
func pathIsExcluded(root, path string, pm *fileutils.PatternMatcher) (string, bool, error) {
	rel, err := convertToRelSubdirectory(root, path)
	if err != nil {
		return "", false, errors.Wrapf(err, "copier: error computing path of %q relative to root %q", path, root)
	}
	if pm == nil {
		return rel, false, nil
	}
	if rel == "." {
		// special case
		return rel, false, nil
	}
	// Matches uses filepath.FromSlash() to convert candidates before
	// checking if they match the patterns it's been given, implying that
	// it expects Unix-style paths.
	matches, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
	if err != nil {
		return rel, false, errors.Wrapf(err, "copier: error checking if %q is excluded", rel)
	}
	if matches {
		return rel, true, nil
	}
	return rel, false, nil
}

// resolvePath resolves symbolic links in paths, treating the specified
// directory as the root.
// Resolving the path this way, and using the result, is in no way secure
// against another process manipulating the content that we're looking at, and
// it is not expected to be.
// This helps us approximate chrooted behavior on systems and in test cases
// where chroot isn't available.
func resolvePath(root, path string, pm *fileutils.PatternMatcher) (string, error) {
	rel, err := convertToRelSubdirectory(root, path)
	if err != nil {
		return "", errors.Errorf("error making path %q relative to %q", path, root)
	}
	workingPath := root
	followed := 0
	components := strings.Split(rel, string(os.PathSeparator))
	excluded := false
	for len(components) > 0 {
		// if anything we try to examine is excluded, then resolution has to "break"
		_, thisExcluded, err := pathIsExcluded(root, filepath.Join(workingPath, components[0]), pm)
		if err != nil {
			return "", err
		}
		excluded = excluded || thisExcluded
		if !excluded {
			if target, err := os.Readlink(filepath.Join(workingPath, components[0])); err == nil {
				followed++
				if followed > maxLoopsFollowed {
					return "", &os.PathError{
						Op:   "open",
						Path: path,
						Err:  syscall.ELOOP,
					}
				}
				if filepath.IsAbs(target) || looksLikeAbs(target) {
					// symlink to an absolute path - prepend the
					// root directory to that absolute path to
					// replace the current location, and resolve
					// the remaining components
					workingPath = root
					components = append(strings.Split(target, string(os.PathSeparator)), components[1:]...)
					continue
				}
				// symlink to a relative path - add the link target to
				// the current location to get the next location, and
				// resolve the remaining components
				rel, err := convertToRelSubdirectory(root, filepath.Join(workingPath, target))
				if err != nil {
					return "", errors.Errorf("error making path %q relative to %q", filepath.Join(workingPath, target), root)
				}
				workingPath = root
				components = append(strings.Split(filepath.Clean(string(os.PathSeparator)+rel), string(os.PathSeparator)), components[1:]...)
				continue
			}
		}
		// append the current component's name to get the next location
		workingPath = filepath.Join(workingPath, components[0])
		if workingPath == filepath.Join(root, "..") {
			// attempted to go above the root using a relative path .., scope it
			workingPath = root
		}
		// ready to handle the next component
		components = components[1:]
	}
	return workingPath, nil
}

func copierHandlerStat(req request, pm *fileutils.PatternMatcher) *response {
	errorResponse := func(fmtspec string, args ...interface{}) *response {
		return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse{}}
	}
	if len(req.Globs) == 0 {
		return errorResponse("copier: stat: expected at least one glob pattern, got none")
	}
	var stats []*StatsForGlob
	for i, glob := range req.Globs {
		s := StatsForGlob{
			Glob: req.preservedGlobs[i],
		}
		stats = append(stats, &s)
		// glob this pattern
		globMatched, err := filepath.Glob(glob)
		if err != nil {
			s.Error = fmt.Sprintf("copier: stat: %q while matching glob pattern %q", err.Error(), glob)
			continue
		}
		// collect the matches
		s.Globbed = make([]string, 0, len(globMatched))
		s.Results = make(map[string]*StatForItem)
		for _, globbed := range globMatched {
			rel, excluded, err := pathIsExcluded(req.Root, globbed, pm)
			if err != nil {
				return errorResponse("copier: stat: %v", err)
			}
			if excluded {
				continue
			}
			// if the glob was an absolute path, reconstruct the
			// path that we should hand back for the match
			var resultName string
			if filepath.IsAbs(req.preservedGlobs[i]) {
				resultName = filepath.Join(req.rootPrefix, globbed)
			} else {
				relResult := rel
				if req.Directory != req.Root {
					relResult, err = convertToRelSubdirectory(req.Directory, globbed)
					if err != nil {
						return errorResponse("copier: stat: error making %q relative to %q: %v", globbed, req.Directory, err)
					}
				}
				resultName = relResult
			}
			result := StatForItem{Name: resultName}
			s.Globbed = append(s.Globbed, resultName)
			s.Results[resultName] = &result
			// lstat the matched value
			linfo, err := os.Lstat(globbed)
			if err != nil {
				result.Error = err.Error()
				continue
			}
			result.Size = linfo.Size()
			result.Mode = linfo.Mode()
			result.ModTime = linfo.ModTime()
			result.IsDir = linfo.IsDir()
			result.IsRegular = result.Mode.IsRegular()
			result.IsSymlink = (linfo.Mode() & os.ModeType) == os.ModeSymlink
			checkForArchive := req.StatOptions.CheckForArchives
			if result.IsSymlink {
				// if the match was a symbolic link, read it
				immediateTarget, err := os.Readlink(globbed)
				if err != nil {
					result.Error = err.Error()
					continue
				}
				// record where it points, both by itself (it
				// could be a relative link) and in the context
				// of the chroot
				result.ImmediateTarget = immediateTarget
				resolvedTarget, err := resolvePath(req.Root, globbed, pm)
				if err != nil {
					return errorResponse("copier: stat: error resolving %q: %v", globbed, err)
				}
				// lstat the thing that we point to
				info, err := os.Lstat(resolvedTarget)
				if err != nil {
					result.Error = err.Error()
					continue
				}
				// replace IsArchive/IsDir/IsRegular with info about the target
				if info.Mode().IsRegular() && req.StatOptions.CheckForArchives {
					result.IsArchive = isArchivePath(resolvedTarget)
					checkForArchive = false
				}
				result.IsDir = info.IsDir()
				result.IsRegular = info.Mode().IsRegular()
			}
			if result.IsRegular && checkForArchive {
				// we were asked to check on this, and it
				// wasn't a symlink, in which case we'd have
				// already checked what the link points to
				result.IsArchive = isArchivePath(globbed)
			}
		}
		// no unskipped matches -> error
		if len(s.Globbed) == 0 {
			s.Globbed = nil
			s.Results = nil
			s.Error = fmt.Sprintf("copier: stat: %q: %v", glob, syscall.ENOENT)
		}
	}
	return &response{Stat: statResponse{Globs: stats}}
}

func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMatcher, idMappings *idtools.IDMappings) (*response, func() error, error) {
	statRequest := req
	statRequest.Request = requestStat
	statResponse := copierHandlerStat(req, pm)
	errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
		return &response{Error: fmt.Sprintf(fmtspec, args...), Stat: statResponse.Stat, Get: getResponse{}}, nil, nil
	}
	if statResponse.Error != "" {
		return errorResponse("%s", statResponse.Error)
	}
	if len(req.Globs) == 0 {
		return errorResponse("copier: get: expected at least one glob pattern, got 0")
	}
	// build a queue of items by globbing
	var queue []string
	globMatchedCount := 0
	for _, glob := range req.Globs {
		globMatched, err := filepath.Glob(glob)
		if err != nil {
			return errorResponse("copier: get: glob %q: %v", glob, err)
		}
		globMatchedCount += len(globMatched)
		queue = append(queue, globMatched...)
	}
	// no matches -> error
	if len(queue) == 0 {
		return errorResponse("copier: get: globs %v matched nothing (%d filtered out): %v", req.Globs, globMatchedCount, syscall.ENOENT)
	}
	cb := func() error {
		tw := tar.NewWriter(bulkWriter)
		defer tw.Close()
		hardlinkChecker := new(util.HardlinkChecker)
		itemsCopied := 0
		for i, item := range queue {
			// if we're not discarding the names of individual directories, keep track of this one
			relNamePrefix := ""
			if req.GetOptions.KeepDirectoryNames {
				relNamePrefix = filepath.Base(item)
			}
			// if the named thing-to-read is a symlink, dereference it
			info, err := os.Lstat(item)
			if err != nil {
				return errors.Wrapf(err, "copier: get: lstat %q", item)
			}
			// chase links. if we hit a dead end, we should just fail
			followedLinks := 0
			const maxFollowedLinks = 16
			for info.Mode()&os.ModeType == os.ModeSymlink && followedLinks < maxFollowedLinks {
				path, err := os.Readlink(item)
				if err != nil {
					continue
				}
				if filepath.IsAbs(path) || looksLikeAbs(path) {
					path = filepath.Join(req.Root, path)
				} else {
					path = filepath.Join(filepath.Dir(item), path)
				}
				item = path
				if _, err = convertToRelSubdirectory(req.Root, item); err != nil {
					return errors.Wrapf(err, "copier: get: computing path of %q(%q) relative to %q", queue[i], item, req.Root)
				}
				if info, err = os.Lstat(item); err != nil {
					return errors.Wrapf(err, "copier: get: lstat %q(%q)", queue[i], item)
				}
				followedLinks++
			}
			if followedLinks >= maxFollowedLinks {
				return errors.Wrapf(syscall.ELOOP, "copier: get: resolving symlink %q(%q)", queue[i], item)
			}
			// evaluate excludes relative to the root directory
			if info.Mode().IsDir() {
				// we don't expand any of the contents that are archives
				options := req.GetOptions
				options.ExpandArchives = false
				walkfn := func(path string, info os.FileInfo, err error) error {
					if err != nil {
						return errors.Wrapf(err, "copier: get: error reading %q", path)
					}
					// compute the path of this item
					// relative to the top-level directory,
					// for the tar header
					rel, relErr := convertToRelSubdirectory(item, path)
					if relErr != nil {
						return errors.Wrapf(relErr, "copier: get: error computing path of %q relative to top directory %q", path, item)
					}
					// prefix the original item's name if we're keeping it
					if relNamePrefix != "" {
						rel = filepath.Join(relNamePrefix, rel)
					}
					if rel == "" || rel == "." {
						// skip the "." entry
						return nil
					}
					_, skip, err := pathIsExcluded(req.Root, path, pm)
					if err != nil {
						return err
					}
					if skip {
						// don't use filepath.SkipDir
						// here, since a more specific
						// but-include-this for
						// something under it might
						// also be in the excludes list
						return nil
					}
					// if it's a symlink, read its target
					symlinkTarget := ""
					if info.Mode()&os.ModeType == os.ModeSymlink {
						target, err := os.Readlink(path)
						if err != nil {
							return errors.Wrapf(err, "copier: get: readlink(%q(%q))", rel, path)
						}
						symlinkTarget = target
					}
					// add the item to the outgoing tar stream
					return copierHandlerGetOne(info, symlinkTarget, rel, path, options, tw, hardlinkChecker, idMappings)
				}
				// walk the directory tree, checking/adding items individually
				if err := filepath.Walk(item, walkfn); err != nil {
					return errors.Wrapf(err, "copier: get: %q(%q)", queue[i], item)
				}
				itemsCopied++
			} else {
				_, skip, err := pathIsExcluded(req.Root, item, pm)
				if err != nil {
					return err
				}
				if skip {
					continue
				}
				// add the item to the outgoing tar stream.  in
				// cases where this was a symlink that we
				// dereferenced, be sure to use the name of the
				// link.
				if err := copierHandlerGetOne(info, "", filepath.Base(queue[i]), item, req.GetOptions, tw, hardlinkChecker, idMappings); err != nil {
					return errors.Wrapf(err, "copier: get: %q", queue[i])
				}
				itemsCopied++
			}
		}
		if itemsCopied == 0 {
			return errors.Wrapf(syscall.ENOENT, "copier: get: copied no items")
		}
		return nil
	}
	return &response{Stat: statResponse.Stat, Get: getResponse{}}, cb, nil
}

func handleRename(rename map[string]string, name string) string {
	if rename == nil {
		return name
	}
	// header names always use '/', so use path instead of filepath to manipulate it
	if directMapping, ok := rename[name]; ok {
		return directMapping
	}
	prefix, remainder := path.Split(name)
	for prefix != "" {
		if mappedPrefix, ok := rename[prefix]; ok {
			return path.Join(mappedPrefix, remainder)
		}
		if prefix[len(prefix)-1] == '/' {
			if mappedPrefix, ok := rename[prefix[:len(prefix)-1]]; ok {
				return path.Join(mappedPrefix, remainder)
			}
		}
		newPrefix, middlePart := path.Split(prefix)
		if newPrefix == prefix {
			return name
		}
		prefix = newPrefix
		remainder = path.Join(middlePart, remainder)
	}
	return name
}

func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *util.HardlinkChecker, idMappings *idtools.IDMappings) error {
	// build the header using the name provided
	hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget)
	if err != nil {
		return errors.Wrapf(err, "error generating tar header for %s (%s)", contentPath, symlinkTarget)
	}
	if name != "" {
		hdr.Name = filepath.ToSlash(name)
	}
	if options.Rename != nil {
		hdr.Name = handleRename(options.Rename, hdr.Name)
	}
	if options.StripSetuidBit {
		hdr.Mode &^= cISUID
	}
	if options.StripSetgidBit {
		hdr.Mode &^= cISGID
	}
	if options.StripStickyBit {
		hdr.Mode &^= cISVTX
	}
	// read extended attributes
	var xattrs map[string]string
	if !options.StripXattrs {
		xattrs, err = Lgetxattrs(contentPath)
		if err != nil {
			return errors.Wrapf(err, "error getting extended attributes for %q", contentPath)
		}
	}
	hdr.Xattrs = xattrs // nolint:staticcheck
	if hdr.Typeflag == tar.TypeReg {
		// if it's an archive and we're extracting archives, read the
		// file and spool out its contents in-line.  (if we just
		// inlined the whole file, we'd also be inlining the EOF marker
		// it contains)
		if options.ExpandArchives && isArchivePath(contentPath) {
			f, err := os.Open(contentPath)
			if err != nil {
				return errors.Wrapf(err, "error opening %s", contentPath)
			}
			defer f.Close()
			rc, _, err := compression.AutoDecompress(f)
			if err != nil {
				return errors.Wrapf(err, "error decompressing %s", contentPath)
			}
			defer rc.Close()
			tr := tar.NewReader(rc)
			hdr, err := tr.Next()
			for err == nil {
				if options.Rename != nil {
					hdr.Name = handleRename(options.Rename, hdr.Name)
				}
				if err = tw.WriteHeader(hdr); err != nil {
					return errors.Wrapf(err, "error writing tar header from %q to pipe", contentPath)
				}
				if hdr.Size != 0 {
					n, err := io.Copy(tw, tr)
					if err != nil {
						return errors.Wrapf(err, "error extracting content from archive %s: %s", contentPath, hdr.Name)
					}
					if n != hdr.Size {
						return errors.Errorf("error extracting contents of archive %s: incorrect length for %q", contentPath, hdr.Name)
					}
					tw.Flush()
				}
				hdr, err = tr.Next()
			}
			if err != io.EOF {
				return errors.Wrapf(err, "error extracting contents of archive %s", contentPath)
			}
			return nil
		}
		// if this regular file is hard linked to something else we've
		// already added, set up to output a TypeLink entry instead of
		// a TypeReg entry
		target := hardlinkChecker.Check(srcfi)
		if target != "" {
			hdr.Typeflag = tar.TypeLink
			hdr.Linkname = filepath.ToSlash(target)
			hdr.Size = 0
		} else {
			// note the device/inode pair for this file
			hardlinkChecker.Add(srcfi, name)
		}
	}
	// map the ownership for the archive
	if idMappings != nil && !idMappings.Empty() {
		hostPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
		hdr.Uid, hdr.Gid, err = idMappings.ToContainer(hostPair)
		if err != nil {
			return errors.Wrapf(err, "error mapping host filesystem owners %#v to container filesystem owners", hostPair)
		}
	}
	// force ownership and/or permissions, if requested
	if hdr.Typeflag == tar.TypeDir {
		if options.ChownDirs != nil {
			hdr.Uid, hdr.Gid = options.ChownDirs.UID, options.ChownDirs.GID
		}
		if options.ChmodDirs != nil {
			hdr.Mode = int64(*options.ChmodDirs)
		}
	} else {
		if options.ChownFiles != nil {
			hdr.Uid, hdr.Gid = options.ChownFiles.UID, options.ChownFiles.GID
		}
		if options.ChmodFiles != nil {
			hdr.Mode = int64(*options.ChmodFiles)
		}
	}
	// output the header
	if err = tw.WriteHeader(hdr); err != nil {
		return errors.Wrapf(err, "error writing header for %s (%s)", contentPath, hdr.Name)
	}
	if hdr.Typeflag == tar.TypeReg {
		// output the content
		f, err := os.Open(contentPath)
		if err != nil {
			return errors.Wrapf(err, "error opening %s", contentPath)
		}
		defer f.Close()
		n, err := io.Copy(tw, f)
		if err != nil {
			return errors.Wrapf(err, "error copying %s", contentPath)
		}
		if n != hdr.Size {
			return errors.Errorf("error copying %s: incorrect size (expected %d bytes, read %d bytes)", contentPath, n, hdr.Size)
		}
		tw.Flush()
	}
	return nil
}

func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
	errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
		return &response{Error: fmt.Sprintf(fmtspec, args...), Put: putResponse{}}, nil, nil
	}
	dirUID, dirGID, defaultDirUID, defaultDirGID := 0, 0, 0, 0
	if req.PutOptions.ChownDirs != nil {
		dirUID, dirGID = req.PutOptions.ChownDirs.UID, req.PutOptions.ChownDirs.GID
		defaultDirUID, defaultDirGID = dirUID, dirGID
	}
	defaultDirMode := os.FileMode(0755)
	if req.PutOptions.ChmodDirs != nil {
		defaultDirMode = *req.PutOptions.ChmodDirs
	}
	if req.PutOptions.DefaultDirOwner != nil {
		defaultDirUID, defaultDirGID = req.PutOptions.DefaultDirOwner.UID, req.PutOptions.DefaultDirOwner.GID
	}
	if req.PutOptions.DefaultDirMode != nil {
		defaultDirMode = *req.PutOptions.DefaultDirMode
	}
	var fileUID, fileGID *int
	if req.PutOptions.ChownFiles != nil {
		fileUID, fileGID = &req.PutOptions.ChownFiles.UID, &req.PutOptions.ChownFiles.GID
	}
	if idMappings != nil && !idMappings.Empty() {
		containerDirPair := idtools.IDPair{UID: dirUID, GID: dirGID}
		hostDirPair, err := idMappings.ToHost(containerDirPair)
		if err != nil {
			return errorResponse("copier: put: error mapping container filesystem owner %d:%d to host filesystem owners: %v", dirUID, dirGID, err)
		}
		dirUID, dirGID = hostDirPair.UID, hostDirPair.GID
		defaultDirUID, defaultDirGID = hostDirPair.UID, hostDirPair.GID
		if req.PutOptions.ChownFiles != nil {
			containerFilePair := idtools.IDPair{UID: *fileUID, GID: *fileGID}
			hostFilePair, err := idMappings.ToHost(containerFilePair)
			if err != nil {
				return errorResponse("copier: put: error mapping container filesystem owner %d:%d to host filesystem owners: %v", fileUID, fileGID, err)
			}
			fileUID, fileGID = &hostFilePair.UID, &hostFilePair.GID
		}
	}
	ensureDirectoryUnderRoot := func(directory string) error {
		rel, err := convertToRelSubdirectory(req.Root, directory)
		if err != nil {
			return errors.Wrapf(err, "%q is not a subdirectory of %q", directory, req.Root)
		}
		subdir := ""
		for _, component := range strings.Split(rel, string(os.PathSeparator)) {
			subdir = filepath.Join(subdir, component)
			path := filepath.Join(req.Root, subdir)
			if err := os.Mkdir(path, 0700); err == nil {
				if err = lchown(path, defaultDirUID, defaultDirGID); err != nil {
					return errors.Wrapf(err, "copier: put: error setting owner of %q to %d:%d", path, defaultDirUID, defaultDirGID)
				}
				if err = os.Chmod(path, defaultDirMode); err != nil {
					return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, defaultDirMode)
				}
			} else {
				if !os.IsExist(err) {
					return errors.Wrapf(err, "copier: put: error checking directory %q", path)
				}
			}
		}
		return nil
	}
	createFile := func(path string, tr *tar.Reader) (int64, error) {
		f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
		if err != nil && os.IsExist(err) {
			if req.PutOptions.NoOverwriteDirNonDir {
				if st, err2 := os.Lstat(path); err2 == nil && st.IsDir() {
					return 0, errors.Wrapf(err, "copier: put: error creating file at %q", path)
				}
			}
			if err = os.RemoveAll(path); err != nil {
				return 0, errors.Wrapf(err, "copier: put: error removing item to be overwritten %q", path)
			}
			f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_EXCL, 0600)
		}
		if err != nil {
			return 0, errors.Wrapf(err, "copier: put: error opening file %q for writing", path)
		}
		defer f.Close()
		n, err := io.Copy(f, tr)
		if err != nil {
			return n, errors.Wrapf(err, "copier: put: error writing file %q", path)
		}
		return n, nil
	}
	targetDirectory, err := resolvePath(req.Root, req.Directory, nil)
	if err != nil {
		return errorResponse("copier: put: error resolving %q: %v", req.Directory, err)
	}
	info, err := os.Lstat(targetDirectory)
	if err == nil {
		if !info.IsDir() {
			return errorResponse("copier: put: %s (%s): exists but is not a directory", req.Directory, targetDirectory)
		}
	} else {
		if !os.IsNotExist(err) {
			return errorResponse("copier: put: %s: %v", req.Directory, err)
		}
		if err := ensureDirectoryUnderRoot(req.Directory); err != nil {
			return errorResponse("copier: put: %v", err)
		}
	}
	cb := func() error {
		type directoryAndTimes struct {
			directory    string
			atime, mtime time.Time
		}
		var directoriesAndTimes []directoryAndTimes
		defer func() {
			for i := range directoriesAndTimes {
				directoryAndTimes := directoriesAndTimes[len(directoriesAndTimes)-i-1]
				if err := lutimes(false, directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime); err != nil {
					logrus.Debugf("error setting access and modify timestamps on %q to %s and %s: %v", directoryAndTimes.directory, directoryAndTimes.atime, directoryAndTimes.mtime, err)
				}
			}
		}()
		ignoredItems := make(map[string]struct{})
		tr := tar.NewReader(bulkReader)
		hdr, err := tr.Next()
		for err == nil {
			nameBeforeRenaming := hdr.Name
			if len(hdr.Name) == 0 {
				// no name -> ignore the entry
				ignoredItems[nameBeforeRenaming] = struct{}{}
				hdr, err = tr.Next()
				continue
			}
			if req.PutOptions.Rename != nil {
				hdr.Name = handleRename(req.PutOptions.Rename, hdr.Name)
			}
			// figure out who should own this new item
			if idMappings != nil && !idMappings.Empty() {
				containerPair := idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
				hostPair, err := idMappings.ToHost(containerPair)
				if err != nil {
					return errors.Wrapf(err, "error mapping container filesystem owner 0,0 to host filesystem owners")
				}
				hdr.Uid, hdr.Gid = hostPair.UID, hostPair.GID
			}
			if hdr.Typeflag == tar.TypeDir {
				if req.PutOptions.ChownDirs != nil {
					hdr.Uid, hdr.Gid = dirUID, dirGID
				}
			} else {
				if req.PutOptions.ChownFiles != nil {
					hdr.Uid, hdr.Gid = *fileUID, *fileGID
				}
			}
			// make sure the parent directory exists, including for tar.TypeXGlobalHeader entries
			// that we otherwise ignore, because that's what docker build does with them
			path := filepath.Join(targetDirectory, cleanerReldirectory(filepath.FromSlash(hdr.Name)))
			if err := ensureDirectoryUnderRoot(filepath.Dir(path)); err != nil {
				return err
			}
			// figure out what the permissions should be
			if hdr.Typeflag == tar.TypeDir {
				if req.PutOptions.ChmodDirs != nil {
					hdr.Mode = int64(*req.PutOptions.ChmodDirs)
				}
			} else {
				if req.PutOptions.ChmodFiles != nil {
					hdr.Mode = int64(*req.PutOptions.ChmodFiles)
				}
			}
			// create the new item
			devMajor := uint32(hdr.Devmajor)
			devMinor := uint32(hdr.Devminor)
			mode := os.FileMode(hdr.Mode) & os.ModePerm
			switch hdr.Typeflag {
			// no type flag for sockets
			default:
				return errors.Errorf("unrecognized Typeflag %c", hdr.Typeflag)
			case tar.TypeReg, tar.TypeRegA:
				var written int64
				written, err = createFile(path, tr)
				// only check the length if there wasn't an error, which we'll
				// check along with errors for other types of entries
				if err == nil && written != hdr.Size {
					return errors.Errorf("copier: put: error creating %q: incorrect length (%d != %d)", path, written, hdr.Size)
				}
			case tar.TypeLink:
				var linkTarget string
				if _, ignoredTarget := ignoredItems[hdr.Linkname]; ignoredTarget {
					// hard link to an ignored item: skip this, too
					ignoredItems[nameBeforeRenaming] = struct{}{}
					goto nextHeader
				}
				if req.PutOptions.Rename != nil {
					hdr.Linkname = handleRename(req.PutOptions.Rename, hdr.Linkname)
				}
				if linkTarget, err = resolvePath(targetDirectory, filepath.Join(req.Root, filepath.FromSlash(hdr.Linkname)), nil); err != nil {
					return errors.Errorf("error resolving hardlink target path %q under root %q", hdr.Linkname, req.Root)
				}
				if err = os.Link(linkTarget, path); err != nil && os.IsExist(err) {
					if req.PutOptions.NoOverwriteDirNonDir {
						if st, err := os.Lstat(path); err == nil && st.IsDir() {
							break
						}
					}
					if err = os.Remove(path); err == nil {
						err = os.Link(linkTarget, path)
					}
				}
			case tar.TypeSymlink:
				// if req.PutOptions.Rename != nil {
				//	todo: the general solution requires resolving to an absolute path, handling
				//	renaming, and then possibly converting back to a relative symlink
				// }
				if err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path)); err != nil && os.IsExist(err) {
					if req.PutOptions.NoOverwriteDirNonDir {
						if st, err := os.Lstat(path); err == nil && st.IsDir() {
							break
						}
					}
					if err = os.Remove(path); err == nil {
						err = os.Symlink(filepath.FromSlash(hdr.Linkname), filepath.FromSlash(path))
					}
				}
			case tar.TypeChar:
				if req.PutOptions.IgnoreDevices {
					ignoredItems[nameBeforeRenaming] = struct{}{}
					goto nextHeader
				}
				if err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) {
					if req.PutOptions.NoOverwriteDirNonDir {
						if st, err := os.Lstat(path); err == nil && st.IsDir() {
							break
						}
					}
					if err = os.Remove(path); err == nil {
						err = mknod(path, chrMode(0600), int(mkdev(devMajor, devMinor)))
					}
				}
			case tar.TypeBlock:
				if req.PutOptions.IgnoreDevices {
					ignoredItems[nameBeforeRenaming] = struct{}{}
					goto nextHeader
				}
				if err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor))); err != nil && os.IsExist(err) {
					if req.PutOptions.NoOverwriteDirNonDir {
						if st, err := os.Lstat(path); err == nil && st.IsDir() {
							break
						}
					}
					if err = os.Remove(path); err == nil {
						err = mknod(path, blkMode(0600), int(mkdev(devMajor, devMinor)))
					}
				}
			case tar.TypeDir:
				if err = os.Mkdir(path, 0700); err != nil && os.IsExist(err) {
					var st os.FileInfo
					if st, err = os.Stat(path); err == nil && !st.IsDir() {
						// it's not a directory, so remove it and mkdir
						if err = os.Remove(path); err == nil {
							err = os.Mkdir(path, 0700)
						}
					}
					// either we removed it and retried, or it was a directory,
					// in which case we want to just add the new stuff under it
				}
				// make a note of the directory's times.  we
				// might create items under it, which will
				// cause the mtime to change after we correct
				// it, so we'll need to correct it again later
				directoriesAndTimes = append(directoriesAndTimes, directoryAndTimes{
					directory: path,
					atime:     hdr.AccessTime,
					mtime:     hdr.ModTime,
				})
			case tar.TypeFifo:
				if err = mkfifo(path, 0600); err != nil && os.IsExist(err) {
					if req.PutOptions.NoOverwriteDirNonDir {
						if st, err := os.Lstat(path); err == nil && st.IsDir() {
							break
						}
					}
					if err = os.Remove(path); err == nil {
						err = mkfifo(path, 0600)
					}
				}
			case tar.TypeXGlobalHeader:
				// Per archive/tar, PAX uses these to specify key=value information
				// applies to all subsequent entries.  The one in reported in #2717,
				// https://www.openssl.org/source/openssl-1.1.1g.tar.gz, includes a
				// comment=(40 byte hex string) at the start, possibly a digest.
				// Don't try to create whatever path was used for the header.
				goto nextHeader
			}
			// check for errors
			if err != nil {
				return errors.Wrapf(err, "copier: put: error creating %q", path)
			}
			// restore xattrs
			if !req.PutOptions.StripXattrs {
				if err = Lsetxattrs(path, hdr.Xattrs); err != nil { // nolint:staticcheck
					if !req.PutOptions.IgnoreXattrErrors {
						return errors.Wrapf(err, "copier: put: error setting extended attributes on %q", path)
					}
				}
			}
			// set ownership
			if err = lchown(path, hdr.Uid, hdr.Gid); err != nil {
				return errors.Wrapf(err, "copier: put: error setting ownership of %q to %d:%d", path, hdr.Uid, hdr.Gid)
			}
			// set permissions, except for symlinks, since we don't have lchmod
			if hdr.Typeflag != tar.TypeSymlink {
				if err = os.Chmod(path, mode); err != nil {
					return errors.Wrapf(err, "copier: put: error setting permissions on %q to 0%o", path, mode)
				}
			}
			// set other bits that might have been reset by chown()
			if hdr.Typeflag != tar.TypeSymlink {
				if hdr.Mode&cISUID == cISUID {
					mode |= syscall.S_ISUID
				}
				if hdr.Mode&cISGID == cISGID {
					mode |= syscall.S_ISGID
				}
				if hdr.Mode&cISVTX == cISVTX {
					mode |= syscall.S_ISVTX
				}
				if err = syscall.Chmod(path, uint32(mode)); err != nil {
					return errors.Wrapf(err, "error setting additional permissions on %q to 0%o", path, mode)
				}
			}
			// set time
			if hdr.AccessTime.IsZero() || hdr.AccessTime.Before(hdr.ModTime) {
				hdr.AccessTime = hdr.ModTime
			}
			if err = lutimes(hdr.Typeflag == tar.TypeSymlink, path, hdr.AccessTime, hdr.ModTime); err != nil {
				return errors.Wrapf(err, "error setting access and modify timestamps on %q to %s and %s", path, hdr.AccessTime, hdr.ModTime)
			}
		nextHeader:
			hdr, err = tr.Next()
		}
		if err != io.EOF {
			return errors.Wrapf(err, "error reading tar stream: expected EOF")
		}
		return nil
	}
	return &response{Error: "", Put: putResponse{}}, cb, nil
}

func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, func() error, error) {
	errorResponse := func(fmtspec string, args ...interface{}) (*response, func() error, error) {
		return &response{Error: fmt.Sprintf(fmtspec, args...), Mkdir: mkdirResponse{}}, nil, nil
	}
	dirUID, dirGID := 0, 0
	if req.MkdirOptions.ChownNew != nil {
		dirUID, dirGID = req.MkdirOptions.ChownNew.UID, req.MkdirOptions.ChownNew.GID
	}
	dirMode := os.FileMode(0755)
	if req.MkdirOptions.ChmodNew != nil {
		dirMode = *req.MkdirOptions.ChmodNew
	}
	if idMappings != nil && !idMappings.Empty() {
		containerDirPair := idtools.IDPair{UID: dirUID, GID: dirGID}
		hostDirPair, err := idMappings.ToHost(containerDirPair)
		if err != nil {
			return errorResponse("copier: mkdir: error mapping container filesystem owner %d:%d to host filesystem owners: %v", dirUID, dirGID, err)
		}
		dirUID, dirGID = hostDirPair.UID, hostDirPair.GID
	}

	directory, err := resolvePath(req.Root, req.Directory, nil)
	if err != nil {
		return errorResponse("copier: mkdir: error resolving %q: %v", req.Directory, err)
	}

	rel, err := convertToRelSubdirectory(req.Root, directory)
	if err != nil {
		return errorResponse("copier: mkdir: error computing path of %q relative to %q: %v", directory, req.Root, err)
	}

	subdir := ""
	for _, component := range strings.Split(rel, string(os.PathSeparator)) {
		subdir = filepath.Join(subdir, component)
		path := filepath.Join(req.Root, subdir)
		if err := os.Mkdir(path, 0700); err == nil {
			if err = chown(path, dirUID, dirGID); err != nil {
				return errorResponse("copier: mkdir: error setting owner of %q to %d:%d: %v", path, dirUID, dirGID, err)
			}
			if err = chmod(path, dirMode); err != nil {
				return errorResponse("copier: mkdir: error setting permissions on %q to 0%o: %v", path, dirMode)
			}
		} else {
			if !os.IsExist(err) {
				return errorResponse("copier: mkdir: error checking directory %q: %v", path, err)
			}
		}
	}

	return &response{Error: "", Mkdir: mkdirResponse{}}, nil, nil
}