File: fileitem.cc

package info (click to toggle)
apt-cacher-ng 3.3.1-2~bpo10+1
  • links: PTS, VCS
  • area: main
  • in suites: buster-backports
  • size: 2,040 kB
  • sloc: cpp: 17,564; sh: 553; ansic: 401; perl: 377; makefile: 126
file content (1003 lines) | stat: -rw-r--r-- 25,622 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003

//#define LOCAL_DEBUG
#include "debug.h"

#include "config.h"
#include "fileitem.h"
#include "header.h"
#include "acfg.h"
#include "acbuf.h"
#include "fileio.h"
#include "cleaner.h"
#include "filelocks.h"

#include <errno.h>
#include <algorithm>

#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>

using namespace std;

namespace acng
{
#define MAXTEMPDELAY acng::cfg::maxtempdelay // 27
mstring ACNG_API sReplDir("_altStore" SZPATHSEP);

static tFiGlobMap mapItems;
#ifndef MINIBUILD
static acmutex mapItemsMx;
#endif

header const & fileitem::GetHeaderUnlocked()
{
	return m_head;
}

string fileitem::GetHttpMsg()
{
	setLockGuard;
	if(m_head.frontLine.length()>9)
		return m_head.frontLine.substr(9);
	return m_head.frontLine;
}

fileitem::fileitem() :
			m_nIncommingCount(0),
			m_nSizeSeen(0),
			m_nRangeLimit(-1),
			m_bCheckFreshness(true),
			m_bHeadOnly(false),
			m_bAllowStoreData(true),
			m_nSizeChecked(0),
			m_filefd(-1),
			m_nDlRefsCount(0),
			usercount(0),
			m_status(FIST_FRESH),
			m_nTimeDlStarted(0),
			m_nTimeDlDone(END_OF_TIME),
			m_globRef(mapItems.end())
{
}

fileitem::~fileitem()
{
	//setLockGuard;
	//	m_head.clear();
	Truncate2checkedSize();
	checkforceclose(m_filefd);
}

void fileitem::IncDlRefCount()
{
	setLockGuard;
	m_nDlRefsCount++;
}

void fileitem::DecDlRefCount(const string &sReason)
{
	setLockGuard;

	notifyAll();

	m_nDlRefsCount--;
	if(m_nDlRefsCount>0)
		return; // someone will care...

	// ... otherwise: the last downloader disappeared, needing to tell observers

	if (m_status<FIST_COMPLETE)
	{
		m_status=FIST_DLERROR;
		m_head.clear();
		m_head.frontLine=string("HTTP/1.1 ")+sReason;
		m_head.type=header::ANSWER;

		if (cfg::debug&log::LOG_MORE)
			log::misc(string("Download of ")+m_sPathRel+" aborted");
	}
	Truncate2checkedSize();
	checkforceclose(m_filefd);
}

uint64_t fileitem::GetTransferCount()
{
	setLockGuard;
	uint64_t ret=m_nIncommingCount;
	m_nIncommingCount=0;
	return ret;
}

int fileitem::GetFileFd() {
	LOGSTART("fileitem::GetFileFd");
	setLockGuard;

	ldbg("Opening " << m_sPathRel);
	int fd=open(SZABSPATH(m_sPathRel), O_RDONLY);

#ifdef HAVE_FADVISE
	// optional, experimental
	if(fd>=0)
		posix_fadvise(fd, 0, m_nSizeChecked, POSIX_FADV_SEQUENTIAL);
#endif

	return fd;
}

off_t GetFileSize(cmstring & path, off_t defret)
{
	struct stat stbuf;
	return (0==::stat(path.c_str(), &stbuf)) ? stbuf.st_size : defret;
}

void fileitem::ResetCacheState()
{
	setLockGuard;
	m_nSizeSeen = 0;
	m_nSizeChecked = 0;
	m_status = FIST_FRESH;
	m_bAllowStoreData = true;
	m_head.clear();
}

fileitem::FiStatus fileitem::Setup(bool bCheckFreshness)
{
	LOGSTART2("fileitem::Setup", bCheckFreshness);

	setLockGuard;

	if(m_status>FIST_FRESH)
		return m_status;

	m_status=FIST_INITED;
	m_bCheckFreshness = bCheckFreshness;

	cmstring sPathAbs(CACHE_BASE+m_sPathRel);

	if(m_head.LoadFromFile(sPathAbs+".head") >0 && m_head.type==header::ANSWER )
	{
		if(200 != m_head.getStatus())
			goto error_clean;


		LOG("good head");

		m_nSizeSeen=GetFileSize(sPathAbs, 0);

		// some plausibility checks
		if(m_bCheckFreshness)
		{
			const char *p=m_head.h[header::LAST_MODIFIED];
			if(!p)
				goto error_clean; // suspicious, cannot use it
			LOG("check freshness, last modified: " << p );

			// that will cause check by if-mo-only later, needs to be sure about the size here
			if(cfg::vrangeops == 0
					&& m_nSizeSeen != atoofft(m_head.h[header::CONTENT_LENGTH], -17))
			{
				m_nSizeSeen = 0;
			}
		}
		else
		{
			// non-volatile files, so could accept the length, do some checks first
			const char *pContLen=m_head.h[header::CONTENT_LENGTH];
			if(pContLen)
			{
				off_t nContLen=atoofft(pContLen); // if it's 0 then we assume it's 0

				// file larger than it could ever be?
				if(nContLen < m_nSizeSeen)
					goto error_clean;

				LOG("Content-Length has a sane range");

				m_nSizeChecked=m_nSizeSeen;

				// is it complete? and 0 value also looks weird, try to verify later
				if(m_nSizeSeen == nContLen && nContLen>0)
					m_status=FIST_COMPLETE;
			}
			else
			{
				// no content length known, assume it's ok
				m_nSizeChecked=m_nSizeSeen;
			}
		}
	}
	else // -> no .head file
	{
		// maybe there is some left-over without head file?
		// Don't thrust volatile data, but otherwise try to reuse?
		if(!bCheckFreshness)
			m_nSizeSeen=GetFileSize(sPathAbs, 0);
	}
	LOG("resulting status: " << (int) m_status);
	return m_status;

	error_clean:
	::unlink((sPathAbs+".head").c_str());
	m_head.clear();
	m_nSizeSeen=0;
	m_status=FIST_INITED;
	return m_status; // unuseable, to be redownloaded
}

bool fileitem::CheckUsableRange_unlocked(off_t nRangeLastByte)
{
	if(m_status == FIST_COMPLETE)
		return true;
	if(m_status < FIST_INITED || m_status > FIST_COMPLETE)
		return false;
	if(m_status >= FIST_DLGOTHEAD)
		return nRangeLastByte > m_nSizeChecked;

	// special exceptions for static files
	return (m_status == FIST_INITED && !m_bCheckFreshness
			&& m_nSizeSeen>0 && nRangeLastByte >=0 && nRangeLastByte <m_nSizeSeen
			&& atoofft(m_head.h[header::CONTENT_LENGTH], -255) > nRangeLastByte);
}

bool fileitem::SetupClean(bool bForce)
{
	setLockGuard;

	if(bForce)
	{
		if(m_status>FIST_FRESH)
		{
			m_status = FIST_DLERROR;
			m_head.frontLine="HTTP/1.1 500 FIXME, DEAD ITEM";
		}
	}
	else
	{
		if(m_status>FIST_FRESH)
			return false;
		m_status=FIST_INITED;
	}
	cmstring sPathAbs(SABSPATH(m_sPathRel));
	cmstring sPathHead(sPathAbs+".head");
	// header allowed to be lost in process...
	//	if(unlink(sPathHead.c_str()))
	//		::ignore_value(::truncate(sPathHead.c_str(), 0));
	acng::ignore_value(::truncate(sPathAbs.c_str(), 0));
	Cstat stf(sPathAbs);
	if(stf && stf.st_size>0)
		return false; // didn't work. Permissions? Anyhow, too dangerous to act on this now
	header h;
	h.LoadFromFile(sPathHead);
	h.del(header::CONTENT_LENGTH);
	h.del(header::CONTENT_TYPE);
	h.del(header::LAST_MODIFIED);
	h.del(header::XFORWARDEDFOR);
	h.del(header::CONTENT_RANGE);
	h.StoreToFile(sPathHead);
	//	if(0==stat(sPathHead.c_str(), &stf) && stf.st_size >0)
	//		return false; // that's weird too, header still exists with real size
	m_head.clear();
	m_nSizeSeen=m_nSizeChecked=0;

	return true;
}

void fileitem::SetupComplete()
{
	setLockGuard;
	notifyAll();
	m_nSizeChecked = m_nSizeSeen;
	m_status = FIST_COMPLETE;
}

void fileitem::UpdateHeadTimestamp()
{
	if(m_sPathRel.empty())
		return;
	utimes(SZABSPATH(m_sPathRel + ".head"), nullptr);
}

fileitem::FiStatus fileitem::WaitForFinish(int *httpCode)
{
	lockuniq g(this);
	while(m_status<FIST_COMPLETE)
		wait(g);
	if(httpCode)
		*httpCode=m_head.getStatus();
	return m_status;
}

inline void _LogWithErrno(const char *msg, const string & sFile)
{
	tErrnoFmter f;
	log::err(tSS() << sFile <<
			" storage error [" << msg << "], last errno: " << f);
}

#ifndef MINIBUILD

bool fileitem_with_storage::DownloadStartedStoreHeader(const header & h, size_t hDataLen,
		const char *pNextData,
		bool bForcedRestart, bool &bDoCleanRetry)
{
	LOGSTART("fileitem::DownloadStartedStoreHeader");
	auto SETERROR = [&](LPCSTR x) {
		m_bAllowStoreData=false;
		m_head.frontLine=mstring("HTTP/1.1 ")+x;
		m_head.set(header::XORIG, h.h[header::XORIG]);
		m_status=FIST_DLERROR; m_nTimeDlDone=GetTime();
		_LogWithErrno(x, m_sPathRel);
	};

	auto withError = [&](LPCSTR x) {
		SETERROR(x);
		return false;
	};

	setLockGuard;

	USRDBG( "Download started, storeHeader for " << m_sPathRel << ", current status: " << (int) m_status);
	
	if(m_status >= FIST_COMPLETE)
	{
		USRDBG( "Download was completed or aborted, not restarting before expiration");
		return false;
	}

	// conflict with another thread's download attempt? Deny, except for a forced restart
	if (m_status > FIST_DLPENDING && !bForcedRestart)
		return false;

	if(m_bCheckFreshness)
		m_nTimeDlStarted = GetTime();

	m_nIncommingCount+=hDataLen;

	// optional optimization: hints for the filesystem resp. kernel
	off_t hint_start(0), hint_length(0);

	// status will change, most likely... ie. return withError action
	notifyAll();

	cmstring sPathAbs(CACHE_BASE+m_sPathRel);
	string sHeadPath=sPathAbs + ".head";

	auto withErrorAndKillFile = [&](LPCSTR x)
			{
		SETERROR(x);
		if(m_filefd>=0)
		{
#if _POSIX_SYNCHRONIZED_IO > 0
			fsync(m_filefd);
#endif
			Truncate2checkedSize();
			forceclose(m_filefd);
		}

		LOG("Deleting " << sPathAbs);
		::unlink(sPathAbs.c_str());
		::unlink(sHeadPath.c_str());

		m_status=FIST_DLERROR;
		m_nTimeDlDone=GetTime();
		return false;
			};

	int serverStatus = h.getStatus();
#if 0
#warning FIXME
	static UINT fc=1;
	if(!(++fc % 4))
	{
		serverStatus = 416;
	}
#endif
	switch(serverStatus)
	{
	case 200:
	{
		if(m_status < FIST_DLGOTHEAD)
			bForcedRestart = false; // behave normally, set all data

		if (bForcedRestart)
		{
			if (m_nSizeChecked != 0)
			{
				/* shouldn't be here, server should have resumed at the previous position.
				 * Most likely the remote file was modified after the download started.
				 */
				//USRDBG( "state: " << m_status << ", m_nsc: " << m_nSizeChecked);
				return withError("500 Failed to resume remote download");
			}
			if(h.h[header::CONTENT_LENGTH] && atoofft(h.h[header::CONTENT_LENGTH])
					!= atoofft(h.h[header::CONTENT_LENGTH], -2))
			{
				return withError("500 Failed to resume remote download, bad length");
			}
			m_head.set(header::XORIG, h.h[header::XORIG]);
		}
		else
		{
			m_nSizeChecked=0;
			m_head=h;
		}
		hint_length=atoofft(h.h[header::CONTENT_LENGTH], 0);
		break;
	}
	case 206:
	{

		if(m_nSizeSeen<=0 && m_nRangeLimit<0)
		{
			// wtf? Cannot have requested partial content
			return withError("500 Unexpected Partial Response");
		}
		/*
		 * Range: bytes=453291-
		 * ...
		 * Content-Length: 7271829
		 * Content-Range: bytes 453291-7725119/7725120
		 */
		const char *p=h.h[header::CONTENT_RANGE];
		if(!p)
			return withError("500 Missing Content-Range in Partial Response");
		off_t myfrom, myto, mylen;
		int n=sscanf(p, "bytes " OFF_T_FMT "-" OFF_T_FMT "/" OFF_T_FMT, &myfrom, &myto, &mylen);
		if(n<=0)
			n=sscanf(p, "bytes=" OFF_T_FMT "-" OFF_T_FMT "/" OFF_T_FMT, &myfrom, &myto, &mylen);

		ldbg("resuming? n: "<< n << " und myfrom: " <<myfrom << 
				" und myto: " << myto << " und mylen: " << mylen);
		if(n!=3  // check for nonsense
				|| (m_nSizeSeen>0 && myfrom != m_nSizeSeen-1)
				|| (m_nRangeLimit>=0 && myto != m_nRangeLimit)
				|| myfrom<0 || mylen<0
		)
		{
			return withError("500 Server reports unexpected range");
		}

		m_nSizeChecked=myfrom;

		hint_start=myfrom;
		hint_length=mylen;

		m_head=h;
		m_head.frontLine="HTTP/1.1 200 OK";
		m_head.del(header::CONTENT_RANGE);
		m_head.set(header::CONTENT_LENGTH, mylen);
		m_head.set(header::XORIG, h.h[header::XORIG]);

		// target opened before? close it, will reopen&seek later
		if (bForcedRestart)
		{
			Truncate2checkedSize();
			checkforceclose(m_filefd);
		}

		// special optimization; if "-1 trick" was used then maybe don't reopen that file for writing later
		if(m_bCheckFreshness && pNextData && m_nSizeSeen == mylen && m_nSizeChecked == mylen-1)
		{
			int fd=open(sPathAbs.c_str(), O_RDONLY);
			if(fd>=0)
			{
				if(m_nSizeChecked==lseek(fd, m_nSizeChecked, SEEK_SET))
				{
					char c;
					if(1 == read(fd, &c, 1) && c == *pNextData)
					{
						if(cfg::debug & log::LOG_DEBUG)
							log::err(tSS() << "known data hit, don't write to: "<< m_sPathRel);
						m_bAllowStoreData=false;
						m_nSizeChecked=mylen;
					}
				}
				// XXX: optimize that, open as RW if possible and keep the file open for writing
				forceclose(fd);
			}
		}
		break;
	}
	case 416:
		// that's bad; it cannot have been completed before (the -1 trick)
		// however, proxy servers with v3r4 cl3v3r caching strategy can cause that
		// if if-mo-since is used and they don't like it, so attempt a retry in this case
		if(m_nSizeChecked == 0)
		{
			USRDBG( "Peer denied to resume previous download (transient error) " << m_sPathRel );
			m_nSizeSeen = 0;
			bDoCleanRetry=true;
			return false;
		}
		else
		{
			// -> kill cached file ASAP
			m_bAllowStoreData=false;
			m_head.copy(h, header::XORIG);
			return withErrorAndKillFile("503 Server disagrees on file size, cleaning up");
		}
		break;
	default:
		m_head.type=header::ANSWER;
		m_head.copy(h, header::XORIG);
		m_head.copy(h, header::LOCATION);
		if(bForcedRestart)
		{
			// got an error from the replacement mirror? cannot handle it properly
			// because some job might already have started returning the data
			USRDBG( "Cannot restart, HTTP code: " << serverStatus);
			return withError(h.getCodeMessage());
		}

		m_bAllowStoreData = false;
		// have a clean header with just the error message
		m_head.frontLine = h.frontLine;
		m_head.set(header::CONTENT_LENGTH, "0");
		if(m_status > FIST_DLGOTHEAD)
		{
			// oh shit. Client may have already started sending it. Prevent such trouble in future.
			unlink(sHeadPath.c_str());
		}
	}

	if(cfg::debug & log::LOG_MORE)
		log::misc(string("Download of ")+m_sPathRel+" started");

	if(m_bAllowStoreData)
	{
		// using adaptive Delete-Or-Replace-Or-CopyOnWrite strategy

		MoveRelease2Sidestore();

		// First opening the file to be sure that it can be written. Header storage is the critical point,
		// every error after that leads to full cleanup to not risk inconsistent file contents 

		int flags = O_WRONLY | O_CREAT | O_BINARY;
		struct stat stbuf;

		mkbasedir(sPathAbs);

		m_filefd = open(sPathAbs.c_str(), flags, cfg::fileperms);
		ldbg("file opened?! returned: " << m_filefd);

		// self-recovery from cache poisoned with files with wrong permissions
		if (m_filefd < 0)
		{
			if(m_nSizeChecked > 0) // OOOH CRAP! CANNOT APPEND HERE! Do what's still possible.
			{
				string temp = sPathAbs + ".tmp";
				if(FileCopy(sPathAbs, temp) && 0 == unlink(sPathAbs.c_str()) )
				{
					if(0!=rename(temp.c_str(), sPathAbs.c_str()))
						return withError("503 Cannot rename files");

					// be sure about that
					if(0!=stat(sPathAbs.c_str(), &stbuf) || stbuf.st_size!=m_nSizeSeen)
						return withError("503 Cannot copy file parts, filesystem full?");

					m_filefd=open(sPathAbs.c_str(), flags, cfg::fileperms);
					ldbg("file opened after copying around: ");
				}
				else
					return withError((tSS()<<"503 Cannot store or remove files in "
							<< GetDirPart(sPathAbs)).c_str());
			}
			else
			{
				unlink(sPathAbs.c_str());
				m_filefd=open(sPathAbs.c_str(), flags, cfg::fileperms);
				ldbg("file force-opened?! returned: " << m_filefd);
			}
		}

		if (m_filefd<0)
		{
			tErrnoFmter efmt("503 Cache storage error - ");
#ifdef DEBUG
			return withError((efmt+sPathAbs).c_str());
#else
			return withError(efmt.c_str());
#endif
		}

		if(0 != fstat(m_filefd, &stbuf) || !S_ISREG(stbuf.st_mode))
			return withErrorAndKillFile("503 Not a regular file");

		// this makes sure not to truncate file while it's mmaped
		auto tempLock = TFileShrinkGuard::Acquire(stbuf);

		// crop, but only if the new size is smaller. MUST NEVER become larger (would fill with zeros)
		if(m_nSizeSeen != 0 && m_nSizeChecked <= m_nSizeSeen)
		{
			if(0==Truncate2checkedSize())
			{
#if ( (_POSIX_C_SOURCE >= 199309L || _XOPEN_SOURCE >= 500) && _POSIX_SYNCHRONIZED_IO > 0)
				fdatasync(m_filefd);
#endif
			}
			else
				return withErrorAndKillFile("503 Cannot change file size");
		}
		else if(m_nSizeChecked>m_nSizeSeen) // should never happen and caught by the checks above
			return withErrorAndKillFile("503 Internal error on size checking");
		// else... nothing to fix since the expectation==reality

		//	falloc_helper(m_filefd, hint_start, hint_length);
		if(m_nSizeSeen < (off_t) cfg::allocspace)
			falloc_helper(m_filefd, 0, min(hint_start+hint_length, (off_t)cfg::allocspace));
		/*
		 * double-check the docs, this is probably not relevant for writting
#ifdef HAVE_FADVISE
	// optional, experimental
		posix_fadvise(m_filefd, hint_start, hint_length, POSIX_FADV_SEQUENTIAL);
#endif
		 */
		ldbg("Storing header as " + sHeadPath);
		int count=m_head.StoreToFile(sHeadPath);

		if(count<0)
			return withErrorAndKillFile( (-count != ENOSPC
					? "503 Cache storage error" : "503 OUT OF DISK SPACE"));

		// double-check the sane state
		if(0 != fstat(m_filefd, &stbuf) || stbuf.st_size != m_nSizeChecked)
			return withErrorAndKillFile("503 Inconsistent file state");

		if(m_nSizeChecked != lseek(m_filefd, m_nSizeChecked, SEEK_SET))
			return withErrorAndKillFile("503 IO error, positioning");
	}

	m_status=FIST_DLGOTHEAD;
	return true;
}

bool fileitem_with_storage::StoreFileData(const char *data, unsigned int size)
{
	setLockGuard;

	LOGSTART2("fileitem::StoreFileData", "status: " <<  (int) m_status << ", size: " << size);

	// something might care, most likely... also about BOUNCE action
	notifyAll();

	m_nIncommingCount += size;

	if(m_status > FIST_COMPLETE || m_status < FIST_DLGOTHEAD)
	{
		ldbg("StoreFileData rejected, status: " << (int) m_status)
		return false;
	}

	if (size == 0)
	{
		if(FIST_COMPLETE == m_status)
		{
			LOG("already completed");
		}
		else
		{
			m_status = FIST_COMPLETE;
			m_nTimeDlDone = GetTime();

			if (cfg::debug & log::LOG_MORE)
				log::misc(tSS() << "Download of " << m_sPathRel << " finished");

			// we are done! Fix header from chunked transfers?
			if (m_filefd >= 0 && !m_head.h[header::CONTENT_LENGTH])
			{
				m_head.set(header::CONTENT_LENGTH, m_nSizeChecked);
				m_head.StoreToFile(CACHE_BASE + m_sPathRel + ".head");
			}
		}
	}
	else
	{
		m_status = FIST_DLRECEIVING;

		if (m_bAllowStoreData && m_filefd >= 0)
		{
			while(size > 0)
			{
				int r = write(m_filefd, data, size);
				if(r < 0)
				{
					if(EINTR == errno || EAGAIN == errno)
						continue;
					tErrnoFmter efmt("HTTP/1.1 503 ");
					m_head.frontLine = efmt;
					m_status=FIST_DLERROR;
					// message will be set by the caller
					m_nTimeDlDone = GetTime();
					_LogWithErrno(efmt.c_str(), m_sPathRel);
					return false;
				}
				m_nSizeChecked += r;
				size -= r;
				data += r;
			}
		}
	}

	// needs to remember the good size, just in case the DL is resumed (restarted in hot state)
	if(m_nSizeSeen < m_nSizeChecked)
		m_nSizeSeen = m_nSizeChecked;

	return true;
}

fileItemMgmt::~fileItemMgmt()
{
	LOGSTART("fileItemMgmt::~fileItemMgmt");
	Unreg();
}

inline void fileItemMgmt::Unreg()
{
	LOGSTART("fileItemMgmt::Unreg");

	if(!m_ptr) // unregistered before?
		return;

	lockguard managementLock(mapItemsMx);

	// invalid or not globally registered?
	if(m_ptr->m_globRef == mapItems.end())
		return;

	auto local_ptr(m_ptr); // might disappear
	lockguard fitemLock(*local_ptr);

	if ( -- m_ptr->usercount <= 0)
	{
		if(m_ptr->m_status < fileitem::FIST_COMPLETE && m_ptr->m_status != fileitem::FIST_INITED)
		{
			ldbg("usercount dropped to zero while downloading?: " << (int) m_ptr->m_status);
		}

		// some file items will be held ready for some time
		time_t when(0);
		if (MAXTEMPDELAY && m_ptr->m_bCheckFreshness &&
				m_ptr->m_status == fileitem::FIST_COMPLETE &&
				((when = m_ptr->m_nTimeDlStarted+MAXTEMPDELAY) > GetTime()))
		{
			cleaner::GetInstance().ScheduleFor(when, cleaner::TYPE_EXFILEITEM);
			return;
		}

		// nothing, let's put the item into shutdown state
		m_ptr->m_status = fileitem::FIST_DLSTOP;
		m_ptr->m_head.frontLine="HTTP/1.1 500 Cache file item expired";
		m_ptr->notifyAll();

		LOG("*this is last entry, deleting dl/fi mapping");
		mapItems.erase(m_ptr->m_globRef);
		m_ptr->m_globRef = mapItems.end();

		// make sure it's not double-unregistered accidentally!
		m_ptr.reset();
	}
}


bool fileItemMgmt::PrepareRegisteredFileItemWithStorage(cmstring &sPathUnescaped, bool bConsiderAltStore)
{
	LOGSTART2("fileitem::GetFileItem", sPathUnescaped);

	try
	{
		mstring sPathRel(fileitem_with_storage::NormalizePath(sPathUnescaped));
		lockguard lockGlobalMap(mapItemsMx);
		auto it = mapItems.find(sPathRel);
		if(it != mapItems.end())
		{
			if (bConsiderAltStore)
			{
				// detect items that got stuck somehow
				time_t now(GetTime());
				time_t extime(now - cfg::stucksecs);
				if (it->second->m_nTimeDlDone < extime)
				{
					// try to find its sibling which is in good state?
					for (; it!=mapItems.end() && it->first == sPathRel; ++it)
					{
						if (it->second->m_nTimeDlDone >= extime)
						{
							it->second->usercount++;
							LOG("Sharing an existing REPLACEMENT file item");
							m_ptr = it->second;
							return true;
						}
					}
					// ok, then create a modded name version in the replacement directory
					mstring sPathRelMod(sPathRel);
					replaceChars(sPathRelMod, "/\\", '_');
					sPathRelMod.insert(0, sReplDir + ltos(getpid()) + "_" + ltos(now) + "_");
					LOG("Registering a new REPLACEMENT file item...");
					auto sp(make_shared<fileitem_with_storage>(sPathRelMod, 1));
					sp->m_globRef = mapItems.insert(make_pair(sPathRel, sp));
					m_ptr = sp;
					return true;
				}
			}
			LOG("Sharing existing file item");
			it->second->usercount++;
			m_ptr = it->second;
			return true;
		}
		LOG("Registering the NEW file item...");
		auto sp(make_shared<fileitem_with_storage>(sPathRel, 1));
		sp->m_globRef = mapItems.insert(make_pair(sPathRel, sp));
		//lockGlobalMap.unLock();
		m_ptr = sp;
		return true;
	}
	catch(std::bad_alloc&)
	{
	}
	return false;
}

// make the fileitem globally accessible
bool fileItemMgmt::RegisterFileItem(tFileItemPtr spCustomFileItem)
{
	LOGSTART2("fileitem::RegisterFileItem", spCustomFileItem->m_sPathRel);

	if (!spCustomFileItem || spCustomFileItem->m_sPathRel.empty())
		return false;

	Unreg();

	lockguard lockGlobalMap(mapItemsMx);

	if(ContHas(mapItems, spCustomFileItem->m_sPathRel))
		return false; // conflict, another agent is already active

	spCustomFileItem->m_globRef = mapItems.emplace(spCustomFileItem->m_sPathRel,
			spCustomFileItem);
	spCustomFileItem->usercount = 1;
	m_ptr = spCustomFileItem;
	return true;
}

void fileItemMgmt::RegisterFileitemLocalOnly(fileitem* replacement)
{
	LOGSTART2("fileItemMgmt::ReplaceWithLocal", replacement);
	Unreg();
	m_ptr.reset(replacement);
}


// this method is supposed to be awaken periodically and detects items with ref count manipulated by
// the request storm prevention mechanism. Items shall be be dropped after some time if no other
// thread but us is using them.
time_t fileItemMgmt::BackgroundCleanup()
{
	LOGSTART2s("fileItemMgmt::BackgroundCleanup", GetTime());
	lockguard lockGlobalMap(mapItemsMx);

	time_t now = GetTime();
	time_t oldestGet = END_OF_TIME;
	time_t expBefore = now - MAXTEMPDELAY;

	for(auto it = mapItems.begin(); it != mapItems.end();)
	{
		auto here = it++;

		// became busy again? Ignore this entry, it's new master will take care of the deletion ASAP
		if(here->second->usercount >0)
			continue;

		// find and ignore (but remember) the candidate(s) for the next cycle
		if (here->second->m_nTimeDlStarted > expBefore)
		{
			oldestGet = std::min(time_t(here->second->m_nTimeDlStarted), oldestGet);
			continue;
		}

		// ok, unused and delay is over. Destroy with the same sequence as mgmt destructor does,
		// care about life time.
		tFileItemPtr local_ptr(here->second);
		{
			lockguard g(*local_ptr);
			local_ptr->m_status = fileitem::FIST_DLSTOP;
			local_ptr->m_globRef = mapItems.end();
			mapItems.erase(here);
		}
		local_ptr->notifyAll();
	}

	if(oldestGet == END_OF_TIME)
		return oldestGet;

	ldbg(oldestGet);

	// preserving a few seconds to catch more of them in the subsequent run
	return std::max(oldestGet + MAXTEMPDELAY, GetTime() + 8);
}

ssize_t fileitem_with_storage::SendData(int out_fd, int in_fd, off_t &nSendPos, size_t count)
{
#ifndef HAVE_LINUX_SENDFILE
	return sendfile_generic(out_fd, in_fd, &nSendPos, count);
#else
	ssize_t r=sendfile(out_fd, in_fd, &nSendPos, count);

	if(r<0 && (errno == ENOSYS || errno == EINVAL))
		return sendfile_generic(out_fd, in_fd, &nSendPos, count);

	return r;
#endif
}

void fileItemMgmt::dump_status()
{
	tSS fmt;
	log::err("File descriptor table:\n");
	for(const auto& item : mapItems)
	{
		fmt.clear();
		fmt << "FREF: " << item.first << " [" << item.second->usercount << "]:\n";
		if(! item.second)
		{
			fmt << "\tBAD REF!\n";
			continue;
		}
		else
		{
			fmt << "\t" << item.second->m_sPathRel
					<< "\n\tDlRefCount: " << item.second->m_nDlRefsCount
					<< "\n\tState: " << (int)  item.second->m_status
					<< "\n\tFilePos: " << item.second->m_nIncommingCount << " , "
					<< item.second->m_nRangeLimit << " , "
					<< item.second->m_nSizeChecked << " , "
					<< item.second->m_nSizeSeen
					<< "\n\tGotAt: " << item.second->m_nTimeDlStarted << "\n\n";
		}
		log::err(fmt.c_str(), nullptr);
	}
	log::flush();
}

fileitem_with_storage::~fileitem_with_storage()
{
	Truncate2checkedSize();

	if(startsWith(m_sPathRel, sReplDir))
	{
		::unlink(SZABSPATH(m_sPathRel));
		::unlink((SABSPATH(m_sPathRel)+".head").c_str());
	}
}

int fileitem_with_storage::Truncate2checkedSize()
{
	if(m_filefd == -1 || m_nSizeChecked<0)
		return -1;
	return ftruncate(m_filefd, m_nSizeChecked);
}

// special file? When it's rewritten from start, save the old version instead
int fileitem_with_storage::MoveRelease2Sidestore()
{
	if(m_nSizeChecked)
		return 0;
	if(!endsWithSzAr(m_sPathRel, "/InRelease") && !endsWithSzAr(m_sPathRel, "/Release"))
		return 0;
	auto srcAbs = CACHE_BASE + m_sPathRel;
	Cstat st(srcAbs);
	if(st)
	{
		auto tgtDir = CACHE_BASE + cfg::privStoreRelSnapSufix + sPathSep + GetDirPart(m_sPathRel);
		mkdirhier(tgtDir);
		auto sideFileAbs = tgtDir + ltos(st.st_ino) + ltos(st.st_mtim.tv_sec)
				+ ltos(st.st_mtim.tv_nsec);
		return FileCopy(srcAbs, sideFileAbs);
		//return rename(srcAbs.c_str(), sideFileAbs.c_str());
	}
	return 0;
}

#endif // MINIBUILD

}