gluster-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Gluster-devel] performance seems extremely bad


From: Dale Dude
Subject: [Gluster-devel] performance seems extremely bad
Date: Thu, 05 Jul 2007 12:43:23 -0400
User-agent: Thunderbird 2.0.0.5pre (Windows/20070704)

Kernel 2.6.15. mainline-2.5 patch 275. fuse 2.6.5

Tested with: dbench -t 10 10. Is performance supposed to be this bad?

Glusterfs /volumes: Throughput 15.8983 MB/sec 10 procs

Bypass glusterfs direct to /volume1: Throughput 65.0482 MB/sec 10 procs

Bypass glusterfs direct to /volume2: Throughput 66.5139 MB/sec 10 procs



=============
client.vol:

volume server1
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 127.0.0.1     # IP address of the remote brick
        option remote-subvolume volumenamespace
end-volume

volume server1vol1
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 127.0.0.1     # IP address of the remote brick
        option remote-subvolume clusterfs1
end-volume


volume server1vol2
        type protocol/client
        option transport-type tcp/client     # for TCP/IP transport
        option remote-host 127.0.0.1     # IP address of the remote brick
        option remote-subvolume clusterfs2
end-volume

volume bricks
 type cluster/unify
 option namespace server1
 option readdir-force-success on  # ignore failed mounts
 subvolumes server1vol1 server1vol2

 option scheduler rr
 option rr.limits.min-free-disk 5 #%
end-volume

volume writebehind   #writebehind improves write performance a lot
 type performance/write-behind
 option aggregate-size 131072 # in bytes
 subvolumes bricks
end-volume

volume readahead
 type performance/read-ahead
 option page-size 65536     # unit in bytes
 option page-count 16       # cache per file  = (page-count x page-size)
 subvolumes writebehind
end-volume

volume iothreads
  type performance/io-threads
  option thread-count 32
  subvolumes readahead
end-volume

==============================
server.vol:

volume volume1
 type storage/posix
 option directory /volume1
end-volume

#volume posixlocks1
 #type features/posix-locks
 #option mandatory on          # enables mandatory locking on all files
 #subvolumes volume1
#end-volume

volume clusterfs1
  type performance/io-threads
  option thread-count 16
  subvolumes volume1
end-volume

#######

volume volume2
 type storage/posix
 option directory /volume2
end-volume

#volume posixlocks2
 #type features/posix-locks
 #option mandatory on          # enables mandatory locking on all files
 #subvolumes volume2
#end-volume

volume clusterfs2
  type performance/io-threads
  option thread-count 16
  subvolumes volume2
end-volume

#######

volume volumenamespace
 type storage/posix
 option directory /volume.namespace
end-volume

###

volume clusterfs
 type protocol/server
 option transport-type tcp/server
 subvolumes clusterfs1 clusterfs2 volumenamespace
 option auth.ip.clusterfs1.allow *
 option auth.ip.clusterfs2.allow *
 option auth.ip.volumenamespace.allow *
end-volume




reply via email to

[Prev in Thread] Current Thread [Next in Thread]