Logo Search packages:      
Sourcecode: s3ql version File versions  Download package

def s3ql::fs::Operations::copy_tree (   self,
  src_id,
  target_id 
)
Efficiently copy directory tree

Definition at line 358 of file fs.py.

                                          :
        '''Efficiently copy directory tree'''

        log.debug('copy_tree(%d, %d): start', src_id, target_id)

        # To avoid lookups and make code tidier
        make_inode = self.inodes.create_inode
        db = self.db
                
        # First we make sure that all blocks are in the database
        self.cache.commit()
        log.debug('copy_tree(%d, %d): committed cache', src_id, target_id)

        # Copy target attributes
        src_inode = self.inodes[src_id]
        target_inode = self.inodes[target_id]
        for attr in ('atime', 'ctime', 'mtime', 'mode', 'uid', 'gid'):
            setattr(target_inode, attr, getattr(src_inode, attr))

        # We first replicate into a dummy inode 
        timestamp = time.time()
        tmp = make_inode(mtime=timestamp, ctime=timestamp, atime=timestamp,
                         uid=0, gid=0, mode=0, refcount=0)
        
        queue = [ (src_id, tmp.id, 0) ]
        id_cache = dict()
        processed = 0 # Number of steps since last GIL release
        stamp = time.time() # Time of last GIL release
        gil_step = 100 # Approx. number of steps between GIL releases
        in_transit = set()
        while queue:
            (src_id, target_id, rowid) = queue.pop()
            log.debug('copy_tree(%d, %d): Processing directory (%d, %d, %d)', 
                      src_inode.id, target_inode.id, src_id, target_id, rowid)
            for (name, id_, rowid) in db.query('SELECT name, inode, rowid FROM contents '
                                               'WHERE parent_inode=? AND rowid > ? '
                                               'ORDER BY rowid', (src_id, rowid)):

                if id_ not in id_cache:
                    inode = self.inodes[id_]
    
                    try:
                        inode_new = make_inode(refcount=1, mode=inode.mode, size=inode.size,
                                               uid=inode.uid, gid=inode.gid,
                                               mtime=inode.mtime, atime=inode.atime,
                                               ctime=inode.ctime, target=inode.target,
                                               rdev=inode.rdev)
                    except OutOfInodesError:
                        log.warn('Could not find a free inode')
                        raise FUSEError(errno.ENOSPC)
    
                    id_new = inode_new.id
    
                    if inode.refcount != 1:
                        id_cache[id_] = id_new
    
                    for (obj_id, blockno) in db.query('SELECT obj_id, blockno FROM blocks '
                                                      'WHERE inode=?', (id_,)):
                        processed += 1
                        db.execute('INSERT INTO blocks (inode, blockno, obj_id) VALUES(?, ?, ?)',
                                   (id_new, blockno, obj_id))
                        db.execute('UPDATE objects SET refcount=refcount+1 WHERE id=?', (obj_id,))
                        
                        if (id_, blockno) in self.cache.upload_manager.in_transit:
                            in_transit.add((id_, blockno))
    
                    if db.has_val('SELECT 1 FROM contents WHERE parent_inode=?', (id_,)):
                        queue.append((id_, id_new, 0))
                else:
                    id_new = id_cache[id_]
                    self.inodes[id_new].refcount += 1
    
                db.execute('INSERT INTO contents (name, inode, parent_inode) VALUES(?, ?, ?)',
                           (name, id_new, target_id))
                
                processed += 1
                
                if processed > gil_step:
                    log.debug('copy_tree(%d, %d): Requeueing (%d, %d, %d) to yield lock', 
                              src_inode.id, target_inode.id, src_id, target_id, rowid)
                    queue.append((src_id, target_id, rowid))
                    break
            
            if processed > gil_step:
                dt = time.time() - stamp
                gil_step = max(int(gil_step * GIL_RELEASE_INTERVAL / dt), 1)
                log.debug('copy_tree(%d, %d): Adjusting gil_step to %d', 
                          src_inode.id, target_inode.id, gil_step) 
                processed = 0
                llfuse.lock.yield_()
                stamp = time.time()
  
        # If we replicated blocks whose associated objects where still in
        # transit, we have to wait for the transit to complete before we make
        # the replicated tree visible to the user. Otherwise access to the newly
        # created blocks will raise a NoSuchObject exception.
        while in_transit:
            log.debug('copy_tree(%d, %d): in_transit: %s', 
                      src_inode.id, target_inode.id, in_transit)
            in_transit = [ x for x in in_transit 
                           if x in self.cache.upload_manager.in_transit ]
            if in_transit:
                self.cache.upload_manager.join_one()

            
        # Make replication visible
        self.db.execute('UPDATE contents SET parent_inode=? WHERE parent_inode=?',
                     (target_inode.id, tmp.id))
        del self.inodes[tmp.id]
        llfuse.invalidate_inode(target_inode.id)
        
        log.debug('copy_tree(%d, %d): end', src_inode.id, target_inode.id)



Generated by  Doxygen 1.6.0   Back to index