summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--SConstruct228
1 files changed, 113 insertions, 115 deletions
diff --git a/SConstruct b/SConstruct
index e8f9ec2964..9d8dd7f5cb 100644
--- a/SConstruct
+++ b/SConstruct
@@ -491,122 +491,120 @@ else:
for x in platform_list:
print("\t" + x)
print("\nPlease run scons again with argument: platform=<string>")
- sys.exit(255)
-screen = sys.stdout
-node_count = 0
-node_count_max = 0
-node_count_interval = 1
-if ('env' in locals()):
+# The following only makes sense when the env is defined, and assumes it is
+if 'env' in locals():
+ screen = sys.stdout
+ # Progress reporting is not available in non-TTY environments since it
+ # messes with the output (for example, when writing to a file)
+ show_progress = (env['progress'] and sys.stdout.isatty())
+ node_count = 0
+ node_count_max = 0
+ node_count_interval = 1
node_count_fname = str(env.Dir('#')) + '/.scons_node_count'
-# Progress reporting is not available in non-TTY environments since it
-# messes with the output (for example, when writing to a file)
-if sys.stdout.isatty():
- show_progress = env['progress']
-else:
- show_progress = False
-
-import time, math
-
-class cache_progress:
- # The default is 1 GB cache and 12 hours half life
- def __init__(self, path = None, limit = 1073741824, half_life = 43200):
- self.path = path
- self.limit = limit
- self.exponent_scale = math.log(2) / half_life
- if env['verbose'] and path != None:
- screen.write('Current cache limit is ' + self.convert_size(limit) + ' (used: ' + self.convert_size(self.get_size(path)) + ')\n')
- self.delete(self.file_list())
-
- def __call__(self, node, *args, **kw):
- global node_count, node_count_max, node_count_interval, node_count_fname, show_progress
- if show_progress:
- # Print the progress percentage
- node_count += node_count_interval
- if (node_count_max > 0 and node_count <= node_count_max):
- screen.write('\r[%3d%%] ' % (node_count * 100 / node_count_max))
- screen.flush()
- elif (node_count_max > 0 and node_count > node_count_max):
- screen.write('\r[100%] ')
- screen.flush()
+
+ import time, math
+
+ class cache_progress:
+ # The default is 1 GB cache and 12 hours half life
+ def __init__(self, path = None, limit = 1073741824, half_life = 43200):
+ self.path = path
+ self.limit = limit
+ self.exponent_scale = math.log(2) / half_life
+ if env['verbose'] and path != None:
+ screen.write('Current cache limit is ' + self.convert_size(limit) + ' (used: ' + self.convert_size(self.get_size(path)) + ')\n')
+ self.delete(self.file_list())
+
+ def __call__(self, node, *args, **kw):
+ global node_count, node_count_max, node_count_interval, node_count_fname, show_progress
+ if show_progress:
+ # Print the progress percentage
+ node_count += node_count_interval
+ if (node_count_max > 0 and node_count <= node_count_max):
+ screen.write('\r[%3d%%] ' % (node_count * 100 / node_count_max))
+ screen.flush()
+ elif (node_count_max > 0 and node_count > node_count_max):
+ screen.write('\r[100%] ')
+ screen.flush()
+ else:
+ screen.write('\r[Initial build] ')
+ screen.flush()
+
+ def delete(self, files):
+ if len(files) == 0:
+ return
+ if env['verbose']:
+ # Utter something
+ screen.write('\rPurging %d %s from cache...\n' % (len(files), len(files) > 1 and 'files' or 'file'))
+ [os.remove(f) for f in files]
+
+ def file_list(self):
+ if self.path == None:
+ # Nothing to do
+ return []
+ # Gather a list of (filename, (size, atime)) within the
+ # cache directory
+ file_stat = [(x, os.stat(x)[6:8]) for x in glob.glob(os.path.join(self.path, '*', '*'))]
+ if file_stat == []:
+ # Nothing to do
+ return []
+ # Weight the cache files by size (assumed to be roughly
+ # proportional to the recompilation time) times an exponential
+ # decay since the ctime, and return a list with the entries
+ # (filename, size, weight).
+ current_time = time.time()
+ file_stat = [(x[0], x[1][0], (current_time - x[1][1])) for x in file_stat]
+ # Sort by the most resently accessed files (most sensible to keep) first
+ file_stat.sort(key=lambda x: x[2])
+ # Search for the first entry where the storage limit is
+ # reached
+ sum, mark = 0, None
+ for i,x in enumerate(file_stat):
+ sum += x[1]
+ if sum > self.limit:
+ mark = i
+ break
+ if mark == None:
+ return []
else:
- screen.write('\r[Initial build] ')
- screen.flush()
-
- def delete(self, files):
- if len(files) == 0:
- return
- if env['verbose']:
- # Utter something
- screen.write('\rPurging %d %s from cache...\n' % (len(files), len(files) > 1 and 'files' or 'file'))
- [os.remove(f) for f in files]
-
- def file_list(self):
- if self.path == None:
- # Nothing to do
- return []
- # Gather a list of (filename, (size, atime)) within the
- # cache directory
- file_stat = [(x, os.stat(x)[6:8]) for x in glob.glob(os.path.join(self.path, '*', '*'))]
- if file_stat == []:
- # Nothing to do
- return []
- # Weight the cache files by size (assumed to be roughly
- # proportional to the recompilation time) times an exponential
- # decay since the ctime, and return a list with the entries
- # (filename, size, weight).
- current_time = time.time()
- file_stat = [(x[0], x[1][0], (current_time - x[1][1])) for x in file_stat]
- # Sort by the most resently accessed files (most sensible to keep) first
- file_stat.sort(key=lambda x: x[2])
- # Search for the first entry where the storage limit is
- # reached
- sum, mark = 0, None
- for i,x in enumerate(file_stat):
- sum += x[1]
- if sum > self.limit:
- mark = i
- break
- if mark == None:
- return []
- else:
- return [x[0] for x in file_stat[mark:]]
-
- def convert_size(self, size_bytes):
- if size_bytes == 0:
- return "0 bytes"
- size_name = ("bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
- i = int(math.floor(math.log(size_bytes, 1024)))
- p = math.pow(1024, i)
- s = round(size_bytes / p, 2)
- return "%s %s" % (int(s) if i == 0 else s, size_name[i])
-
- def get_size(self, start_path = '.'):
- total_size = 0
- for dirpath, dirnames, filenames in os.walk(start_path):
- for f in filenames:
- fp = os.path.join(dirpath, f)
- total_size += os.path.getsize(fp)
- return total_size
-
-def progress_finish(target, source, env):
- global node_count, progressor
- with open(node_count_fname, 'w') as f:
- f.write('%d\n' % node_count)
- progressor.delete(progressor.file_list())
-
-try:
- with open(node_count_fname) as f:
- node_count_max = int(f.readline())
-except:
- pass
-cache_directory = os.environ.get("SCONS_CACHE")
-# Simple cache pruning, attached to SCons' progress callback. Trim the
-# cache directory to a size not larger than cache_limit.
-cache_limit = float(os.getenv("SCONS_CACHE_LIMIT", 1024)) * 1024 * 1024
-progressor = cache_progress(cache_directory, cache_limit)
-Progress(progressor, interval = node_count_interval)
-
-progress_finish_command = Command('progress_finish', [], progress_finish)
-AlwaysBuild(progress_finish_command)
+ return [x[0] for x in file_stat[mark:]]
+
+ def convert_size(self, size_bytes):
+ if size_bytes == 0:
+ return "0 bytes"
+ size_name = ("bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
+ i = int(math.floor(math.log(size_bytes, 1024)))
+ p = math.pow(1024, i)
+ s = round(size_bytes / p, 2)
+ return "%s %s" % (int(s) if i == 0 else s, size_name[i])
+
+ def get_size(self, start_path = '.'):
+ total_size = 0
+ for dirpath, dirnames, filenames in os.walk(start_path):
+ for f in filenames:
+ fp = os.path.join(dirpath, f)
+ total_size += os.path.getsize(fp)
+ return total_size
+
+ def progress_finish(target, source, env):
+ global node_count, progressor
+ with open(node_count_fname, 'w') as f:
+ f.write('%d\n' % node_count)
+ progressor.delete(progressor.file_list())
+
+ try:
+ with open(node_count_fname) as f:
+ node_count_max = int(f.readline())
+ except:
+ pass
+
+ cache_directory = os.environ.get("SCONS_CACHE")
+ # Simple cache pruning, attached to SCons' progress callback. Trim the
+ # cache directory to a size not larger than cache_limit.
+ cache_limit = float(os.getenv("SCONS_CACHE_LIMIT", 1024)) * 1024 * 1024
+ progressor = cache_progress(cache_directory, cache_limit)
+ Progress(progressor, interval = node_count_interval)
+
+ progress_finish_command = Command('progress_finish', [], progress_finish)
+ AlwaysBuild(progress_finish_command)