Fix ccache-swig internal error bug due to premature file cleanup

Fixes SF bug 1319 which shows up as a failure in the ccache tests on
Debian 64 bit Wheezy, possibly because ENABLE_ZLIB is defined.

This bug is due to files being too aggressively cleaned up part way
through the caching. The .stderr file is cached and then retrieved
from the cache for displaying to stderr. However, the stats are updated
between caching and using the .stderr file. During the stats update the
cache is cleaned and the newly cached files can be removed if the max
number of files per directory is low. Really the cache should be cleaned
up at exit to solve this (as is done in ccache-3.1). The workaround fix
ensures the cached files are ignored during cleanup, which is a bit
tricky as sometimes files from a previous run have the same time stamp,
so that don't appear to be the oldest in the cache.
This commit is contained in:
William Fulton 2013-05-19 00:44:06 +01:00
commit 4ba9365e0f
3 changed files with 27 additions and 6 deletions

View file

@ -159,7 +159,7 @@ int asprintf(char **ptr, const char *format, ...);
int snprintf(char *,size_t ,const char *, ...);
#endif
void cleanup_dir(const char *dir, size_t maxfiles, size_t maxsize);
void cleanup_dir(const char *dir, size_t maxfiles, size_t maxsize, size_t minfiles);
void cleanup_all(const char *dir);
void wipe_all(const char *dir);

View file

@ -75,21 +75,40 @@ static void traverse_fn(const char *fname, struct stat *st)
/* sort the files we've found and delete the oldest ones until we are
below the thresholds */
static void sort_and_clean(void)
static void sort_and_clean(size_t minfiles)
{
unsigned i;
size_t adjusted_minfiles = minfiles;
if (num_files > 1) {
/* sort in ascending data order */
qsort(files, num_files, sizeof(struct files *),
(COMPAR_FN_T)files_compare);
}
/* ensure newly cached files (minfiles) are kept - instead of matching
the filenames of those newly cached, a faster and simpler approach
assumes these are the most recent in the cache and if any other
cached files have an identical time stamp, they will also be kept -
this approach would not be needed if the cleanup was done at exit. */
if (minfiles != 0 && minfiles < num_files) {
unsigned minfiles_index = num_files - minfiles;
time_t minfiles_time = files[minfiles_index]->mtime;
for (i=1; i<=minfiles_index; i++) {
if (files[minfiles_index-i]->mtime == minfiles_time)
adjusted_minfiles++;
else
break;
}
}
/* delete enough files to bring us below the threshold */
for (i=0;i<num_files; i++) {
if ((size_threshold==0 || total_size < size_threshold) &&
(files_threshold==0 || (num_files-i) < files_threshold)) break;
if (adjusted_minfiles != 0 && num_files-i <= adjusted_minfiles)
break;
if (unlink(files[i]->fname) != 0 && errno != ENOENT) {
fprintf(stderr, "unlink %s - %s\n",
files[i]->fname, strerror(errno));
@ -103,7 +122,7 @@ static void sort_and_clean(void)
}
/* cleanup in one cache subdir */
void cleanup_dir(const char *dir, size_t maxfiles, size_t maxsize)
void cleanup_dir(const char *dir, size_t maxfiles, size_t maxsize, size_t minfiles)
{
unsigned i;
@ -117,7 +136,7 @@ void cleanup_dir(const char *dir, size_t maxfiles, size_t maxsize)
traverse(dir, traverse_fn);
/* clean the cache */
sort_and_clean();
sort_and_clean(minfiles);
stats_set_sizes(dir, total_files, total_size);
@ -151,7 +170,8 @@ void cleanup_all(const char *dir)
cleanup_dir(dname,
counters[STATS_MAXFILES],
counters[STATS_MAXSIZE]);
counters[STATS_MAXSIZE],
0);
free(dname);
free(sfile);
}

View file

@ -168,7 +168,8 @@ static void stats_update_size(enum stats stat, size_t size, size_t numfiles)
if (need_cleanup) {
char *p = dirname(stats_file);
cleanup_dir(p, counters[STATS_MAXFILES], counters[STATS_MAXSIZE]);
cleanup_dir(p, counters[STATS_MAXFILES], counters[STATS_MAXSIZE],
numfiles);
free(p);
}
}