Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
async def statfs(self, ctx):
log.debug('started')
stat_ = pyfuse3.StatvfsData()
# Get number of blocks & inodes
blocks = self.db.get_val("SELECT COUNT(id) FROM objects")
inodes = self.db.get_val("SELECT COUNT(id) FROM inodes")
size = self.db.get_val('SELECT SUM(size) FROM blocks')
if size is None:
size = 0
# file system block size, i.e. the minimum amount of space that can
# be allocated. This doesn't make much sense for S3QL, so we just
# return the average size of stored blocks.
stat_.f_frsize = max(4096, size // blocks) if blocks != 0 else 4096
# This should actually be the "preferred block size for doing IO. However, `df` incorrectly
# interprets f_blocks, f_bfree and f_bavail in terms of f_bsize rather than f_frsize as it
async def statfs(self, ctx):
stat_ = pyfuse3.StatvfsData()
stat_.f_bsize = 512
stat_.f_frsize = 512
stat_.f_blocks = 0
stat_.f_bfree = 20
stat_.f_bavail = 20
thing = len(self._inode_map)
stat_.f_files = thing
stat_.f_ffree = thing + 2
stat_.f_favail = thing + 2
return stat_