Logo Search packages:      
Sourcecode: s3ql version File versions  Download package

def s3ql::block_cache::BlockCache::get (   self,
  inode,
  blockno 
)
Get file handle for block `blockno` of `inode`

This method releases the global lock.

Note: if `get` and `remove` are called concurrently, then it is
possible that a block that has been requested with `get` and
passed to `remove` for deletion will not be deleted.

Definition at line 194 of file block_cache.py.

                                 :
        """Get file handle for block `blockno` of `inode`
        
        This method releases the global lock.
        
        Note: if `get` and `remove` are called concurrently, then it is
        possible that a block that has been requested with `get` and
        passed to `remove` for deletion will not be deleted.
        """

        log.debug('get(inode=%d, block=%d): start', inode, blockno)

        if self.size > self.max_size or len(self.cache) > self.max_entries:
            self.expire()

        # Need to release global lock to acquire mlock to prevent deadlocking
        lock.release()
        with self.mlock(inode, blockno):
            lock.acquire()
            
            try:
                el = self.cache[(inode, blockno)]
    
            # Not in cache
            except KeyError:
                filename = os.path.join(self.cachedir,
                                        'inode_%d_block_%d' % (inode, blockno))
                try:
                    obj_id = self.db.get_val("SELECT obj_id FROM blocks WHERE inode=? AND blockno=?",
                                          (inode, blockno))
    
                # No corresponding object
                except NoSuchRowError:
                    log.debug('get(inode=%d, block=%d): creating new block', inode, blockno)
                    el = CacheEntry(inode, blockno, None, filename, "w+b")
    
                # Need to download corresponding object
                else:
                    log.debug('get(inode=%d, block=%d): downloading block', inode, blockno)
                    el = CacheEntry(inode, blockno, obj_id, filename, "w+b")
                    with lock_released:
                        try:
                            if self.bucket.read_after_create_consistent():
                                self.bucket.fetch_fh('s3ql_data_%d' % obj_id, el)
                            else:
                                retry_exc(300, [ NoSuchObject ], self.bucket.fetch_fh,
                                          's3ql_data_%d' % obj_id, el)
                        except:
                            os.unlink(filename)
                            raise
                        
                    # Writing will have set dirty flag
                    el.dirty = False
                    os.rename(el.name + '.d', el.name)
                    
                    self.size += os.fstat(el.fileno()).st_size
    
                self.cache[(inode, blockno)] = el
    
            # In Cache
            else:
                log.debug('get(inode=%d, block=%d): in cache', inode, blockno)
                self.cache.to_head((inode, blockno))

        
        el.last_access = time.time()
        oldsize = os.fstat(el.fileno()).st_size

        # Provide fh to caller
        try:
            log.debug('get(inode=%d, block=%d): yield', inode, blockno)
            yield el
        finally:
            # Update cachesize
            el.flush()
            newsize = os.fstat(el.fileno()).st_size
            self.size += newsize - oldsize

        log.debug('get(inode=%d, block=%d): end', inode, blockno)



Generated by  Doxygen 1.6.0   Back to index