Logo Search packages:      
Sourcecode: s3ql version File versions  Download package

def s3ql::cli::mount::main (   args = None )
Mount S3QL file system

Definition at line 40 of file mount.py.

                   :
    '''Mount S3QL file system'''

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    fuse_opts = get_fuse_opts(options)
    
    # Save handler so that we can remove it when daemonizing
    stdout_log_handler = setup_logging(options, 'mount.log')
    
    if not os.path.exists(options.mountpoint):
        raise QuietError('Mountpoint does not exist.')
        
    if options.profile:
        import cProfile
        import pstats
        prof = cProfile.Profile()

    with get_backend(options.storage_url, options.homedir,
                     options.ssl) as (conn, bucketname):

        if not bucketname in conn:
            raise QuietError("Bucket does not exist.")
        bucket = conn.get_bucket(bucketname, compression=options.compress)

        # Unlock bucket
        try:
            unlock_bucket(options.homedir, options.storage_url, bucket)
        except ChecksumError:
            raise QuietError('Checksum error - incorrect password?')

        # Get paths
        home = get_bucket_home(options.storage_url, options.homedir)

        # Retrieve metadata
        (param, db) = get_metadata(bucket, home)
        
        metadata_upload_thread = MetadataUploadThread(bucket, param, db,
                                                      options.metadata_upload_interval)
        operations = fs.Operations(bucket, db, cachedir=home + '-cache', 
                                   blocksize=param['blocksize'],
                                   cache_size=options.cachesize * 1024,
                                   upload_event=metadata_upload_thread.event,
                                   cache_entries=options.max_cache_entries)
        
        log.info('Mounting filesystem...')
        llfuse.init(operations, options.mountpoint, fuse_opts)
        try:
            if not options.fg:
                conn.prepare_fork()
                me = threading.current_thread()
                for t in threading.enumerate():
                    if t is me:
                        continue
                    log.error('Waiting for thread %s', t)
                    t.join()
  
                if stdout_log_handler:
                    logging.getLogger().removeHandler(stdout_log_handler)
                daemonize(options.homedir)
                conn.finish_fork()
            
            metadata_upload_thread.start()
            if options.upstart:
                os.kill(os.getpid(), signal.SIGSTOP)
            if options.profile:
                prof.runcall(llfuse.main, options.single)
            else:
                llfuse.main(options.single)

        finally:
            llfuse.close()
            metadata_upload_thread.stop()
                
        db_mtime = metadata_upload_thread.db_mtime
        
        if operations.encountered_errors:
            param['needs_fsck'] = True
        else:       
            param['needs_fsck'] = False
         
        # Do not update .params yet, dump_metadata() may
        # fail if the database is corrupted, in which case we
        # want to force an fsck.
           
        seq_no = get_seq_no(bucket)
        if db_mtime == os.stat(home + '.db').st_mtime:
            log.info('File system unchanged, not uploading metadata.')
            del bucket['s3ql_seq_no_%d' % param['seq_no']]         
            param['seq_no'] -= 1
            pickle.dump(param, open(home + '.params', 'wb'), 2)         
        elif seq_no == param['seq_no']:
            log.info('Saving metadata...')
            fh = tempfile.TemporaryFile()
            dump_metadata(fh, db)          
            log.info("Compressing & uploading metadata..")
            cycle_metadata(bucket)
            fh.seek(0)
            param['last-modified'] = time.time() - time.timezone
            bucket.store_fh("s3ql_metadata", fh, param)
            fh.close()
            pickle.dump(param, open(home + '.params', 'wb'), 2)
        else:
            log.error('Remote metadata is newer than local (%d vs %d), '
                      'refusing to overwrite!', seq_no, param['seq_no'])
            log.error('The locally cached metadata will be *lost* the next time the file system '
                      'is mounted or checked and has therefore been backed up.')
            for name in (home + '.params', home + '.db'):
                for i in reversed(range(4)):
                    if os.path.exists(name + '.%d' % i):
                        os.rename(name + '.%d' % i, name + '.%d' % (i+1))     
                os.rename(name, name + '.0')
   
    db.execute('ANALYZE')
    db.execute('VACUUM')
    db.close() 

    if options.profile:
        tmp = tempfile.NamedTemporaryFile()
        prof.dump_stats(tmp.name)
        fh = open('s3ql_profile.txt', 'w')
        p = pstats.Stats(tmp.name, stream=fh)
        tmp.close()
        p.strip_dirs()
        p.sort_stats('cumulative')
        p.print_stats(50)
        p.sort_stats('time')
        p.print_stats(50)
        fh.close()

    if operations.encountered_errors:
        raise QuietError('Some errors were encountered while the file system was mounted,\n'
                         'you should run fsck.s3ql and examine ~/.s3ql/mount.log.')



Generated by  Doxygen 1.6.0   Back to index