Opened 13 years ago

Last modified 9 years ago

#1008 new task

IZ67: Testsuite needs to test multiple functions simultaneously

Reported by: joga Owned by:
Priority: normal Milestone:
Component: testsuite Version: current
Severity: Keywords: tests
Cc:

Description

[Imported from gridengine issuezilla http://gridengine.sunsource.net/issues/show_bug.cgi?id=67]

        Issue #:      67              Platform:     All           Reporter: joga (joga)
       Component:     testsuite          OS:        All
     Subcomponent:    tests           Version:      current          CC:    None defined
        Status:       NEW             Priority:     P3
      Resolution:                    Issue type:    TASK
                                  Target milestone: milestone 1
      Assigned to:    issues@testsuite
      QA Contact:     joga
          URL:
       * Summary:     Testsuite needs to test multiple functions simultaneously
   Status whiteboard:
      Attachments:

     Issue 67 blocks:
   Votes for issue 67:     Vote for this issue


   Opened: Tue Nov 21 01:46:00 -0700 2006 
------------------------


This was IZ 1717 in project gridengine.

Testsuite needs to  multiple functions simultaneously.
Create stress tests, e.g. in system_tests/stress/...

One way to accomplish this is to execute several tests
at once.

In particular, here is an example which stresses the queue
creation of Grid Engine, and at the same time performs some other
operations involving qconf.


#!/bin/sh -x

###############################################################################
# Copyright:    2005 by Sun Microsystems, Inc. All Rights Reserved.
# Purpose:      consolidate setup, configure in one script
# Usage:        queue_qstat_stress.sh <cluster queue number>
# File:         @(#)queue_qstat_stress.sh   1.6 06/24/05
# Author: Ovid Jacob
# Modifications:
###############################################################################

# The syntax will be
# queue_qstat_stress.sh <cluster queue number>
#       <cluster queue number> - the number of cluster queues we want to
                                 configure
# The subroutines will be named appropiately:


setup(){

# Here we setup the queues, reconfigure the scheduler.
# Also create project1, 2,.. and sharetree


# enter the name of a demo user
DEMOUSER=

# DO NOT MODIFY BELOW THIS LINE

TMP=/tmp/setup.$$

# modify scheduler
cat >> $TMP << SCHED_EOF
algorithm                         default
schedule_interval                 0:0:15
maxujobs                          0
queue_sort_method                 load
job_load_adjustments              np_load_avg=2.00
load_adjustment_decay_time        0:2:30
load_formula                      np_load_avg
schedd_job_info                   true
flush_submit_sec                  1
flush_finish_sec                  1
params                            none
reprioritize_interval             0:2:0
halftime                          20
usage_weight_list                 cpu=1.000000,mem=0.000000,io=0.000000
compensation_factor               5.000000
weight_user                       0.250000
weight_project                    0.250000
weight_department                 0.250000
weight_job                        0.250000
weight_tickets_functional         10000
weight_tickets_share              100000
share_override_tickets            TRUE
share_functional_shares           TRUE
max_functional_jobs_to_schedule   200
report_pjob_tickets               TRUE
max_pending_tasks_per_job         50
halflife_decay_list               none
policy_hierarchy                  OFS
weight_ticket                     0.010000
weight_waiting_time               0.100000
weight_deadline                   3600000.000000
weight_urgency                    0.100000
weight_priority                   1.000000
max_reservation                   0
default_duration                  0:10:0
SCHED_EOF

qconf -Msconf $TMP
rm $TMP

# add projects
cat >> $TMP << PRJ1_EOF
name project1
oticket 0
fshare 20
acl NONE
xacl NONE
PRJ1_EOF

qconf -Aprj $TMP
rm $TMP

cat >> $TMP << PRJ2_EOF
name project2
oticket 0
fshare 30
acl NONE
xacl NONE
PRJ2_EOF

qconf -Aprj $TMP
rm $TMP

cat >> $TMP << PRJ3_EOF
name project3
oticket 10000
fshare 20
acl NONE
xacl NONE
PRJ3_EOF


qconf -Aprj $TMP
rm $TMP

cat >> $TMP << PRJ4_EOF
name project4
oticket 0
fshare 30
acl NONE
xacl NONE
PRJ4_EOF

qconf -Aprj $TMP
rm $TMP


# create sharetree

cat >> $TMP << TREE_EOF
id=0
name=Root
type=0
shares=1
childnodes=1,4
id=1
name=groupA
type=0
shares=60
childnodes=2,3
id=2
name=project1
type=1
shares=50
childnodes=NONE
id=3
name=project2
type=1
shares=50
childnodes=NONE
id=4
name=groupB
type=0
shares=40
childnodes=5,6
id=5
name=project3
type=1
shares=25
childnodes=NONE
id=6
name=project4
type=1
shares=75
childnodes=NONE
TREE_EOF

qconf -Astree $TMP
rm $TMP

# add custom resource
qconf -sc > $TMP
cat >> $TMP << CMPLX_EOF
license1            lic1       INT         <=    YES         NO         0        100
CMPLX_EOF

qconf -Mc $TMP
rm $TMP

USER=${DEMOUSER:-root}
# make Demo User a manager
qconf -am $USER

# make Demo User a deadlineuser
qconf -su deadlineusers | grep -v entries > $TMP
echo "entries $USER root" >> $TMP
qconf -Mu $TMP
rm $TMP

# add another cluster queue
qconf -sq all.q | grep -v qname | grep -v subordinate_list | grep -v
load_thresholds | grep -v complex_values > $TMP
echo "qname     special.q" >> $TMP
echo "subordinate_list  all.q=2" >> $TMP
echo "load_thresholds  np_load_avg=1.25" >> $TMP
echo "complex_values   license1=1" >> $TMP

qconf -Aq $TMP
rm $TMP


}


cluster_setup(){

# Here we setup the cluster queues

TMP=/tmp/cluster_queue.$$

i=${1:-1}
max=${2:-20}

while [ $i -le $max ]
do

j=`expr $i + 1 `
# add another cluster queue
qconf -sq all.q | grep -v qname | grep -v subordinate_list | grep -v
load_thresholds | grep -v complex_values > $TMP
echo "qname     queue${i}.q" >> $TMP
echo "subordinate_list  all.q=${j}" >> $TMP
echo "load_thresholds  np_load_avg=1.25" >> $TMP
echo "complex_values   license1=1" >> $TMP

qconf -Aq $TMP
rm $TMP


i=`expr $i + 1 `
done

\rm -rf cluster_queue.*

#qconf -sql


}



cluster_remove(){

# Here we remove the cluster queues

i=${1:-1}

max=`qconf -sql |wc -l`

# don't remove all.q

max=`expr $max - 1 `

while [ $i -le $max ]
do

qconf -dq queue${i}.q

i=`expr $i + 1 `
done

\rm -rf cluster_queue.*

#qconf -sql


}

qstat_loop(){

# Here we run qstat -xml in a loop

while [ 1 ];
do
        rm -rf /var/tmp/qstat.txt.$$.gz

        echo "qstat now.... \n"

        qstat -F -ext -urg -pri -r -xml | gzip -c > \
       /var/tmp/qstat.txt.$$.gz

done
}


user_setup_loop(){

# Here we create users in a loop

while [ 1 ];
do

qconf -Auser root.user
qconf -Auser sgeadmin.user
qconf -au root,sgeadmin RunUsers

done

}



project_setup_loop(){

# Here we create projects in a loop

while [ 1 ];
do

qconf -Aprj `pwd`/amd64.project
qconf -Aprj `pwd`/i86.project
qconf -Aprj `pwd`/new.project
qconf -Aprj `pwd`/rest.project
qconf -Aprj `pwd`/solaris32.project
qconf -Aprj `pwd`/solaris64.project
qconf -Aprj `pwd`/x86solaris.project

done
}


#Main routine

# This is the main routine.
# First we run setup, the we background several
# tasks: cluster_setup, user_setup_loop, project_setup_loop, qstat_loop.

. ${SGE_ROOT}/${SGE_CELL}/common/settings.sh

CLUSTER_MAX=${1:-30}

setup

sleep 5

cluster_setup 1 $CLUSTER_MAX &

user_setup_loop &

project_setup_loop &

qstat_loop &

#cluster_remove 1 $CLUSTER_MAX

Change History (0)

Note: See TracTickets for help on using tickets.