########### https://issues.infn.it/jira/browse/CREAM-154 ########### [root@cream-31 ~]# cat /etc/redhat-release Scientific Linux release 6.5 (Carbon) [root@cream-31 ~]# rpm -qa |egrep -e 'emi-|dynamic'|sort emi-cream-ce-1.2.2-2.el6.noarch emi-release-3.0.0-2.el6.noarch emi-torque-server-1.0.0-2.sl6.noarch emi-torque-utils-2.0.2-2.el6.noarch emi-version-3.8.0-1.el6.x86_64 lcg-info-dynamic-scheduler-pbs-2.4.5-1.el6.noarch [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# strings /usr/sbin/maui | egrep '^[0-9]{5,6}$' | head -1 230774 [root@cream-31 ~]# ll /var/spool/maui/maui.key ls: cannot access /var/spool/maui/maui.key: No such file or directory [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# echo 230774 >> /var/spool/maui/maui.key [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# /usr/bin/diagnose -g --keyfile=/var/spool/maui/maui.key Displaying group information... Name Priority Flags QDef QOSList* PartitionList Target Limits dteam 0 [NONE] [NONE] [NONE] [NONE] 0.00 [NONE] DEFAULT 0 [NONE] [NONE] [NONE] [NONE] 0.00 [NONE] [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# ll /etc/lrms/scheduler.conf -rw-r--r--. 1 root root 608 Jun 27 10:44 /etc/lrms/scheduler.conf [root@cream-31 ~]# diff /etc/lrms/scheduler.conf /etc/lrms/scheduler.conf.orig 24c24 < vo_max_jobs_cmd: /usr/libexec/vomaxjobs-maui -h cream-31.pd.infn.it -k /var/spool/maui/maui.key --- > vo_max_jobs_cmd: /usr/libexec/vomaxjobs-maui -h cream-31.pd.infn.it [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# runuser -s /bin/bash -c "/var/lib/bdii/gip/plugin/glite-info-dynamic-scheduler-wrapper" -- ldap dn: GlueCEUniqueID=cream-31.pd.infn.it:8443/cream-pbs-creamtest2,mds-vo-name=resource,o=grid GlueCEStateWaitingJobs: 0 GlueCEStateRunningJobs: 0 GlueCEStateTotalJobs: 0 GlueCEStateEstimatedResponseTime: 0 GlueCEStateWorstResponseTime: 0 GlueCEStateFreeJobSlots: 8 dn: GlueCEUniqueID=cream-31.pd.infn.it:8443/cream-pbs-creamtest1,mds-vo-name=resource,o=grid GlueCEStateWaitingJobs: 0 GlueCEStateRunningJobs: 0 GlueCEStateTotalJobs: 0 GlueCEStateEstimatedResponseTime: 0 GlueCEStateWorstResponseTime: 0 GlueCEStateFreeJobSlots: 8 dn: GlueCEUniqueID=cream-31.pd.infn.it:8443/cream-pbs-cert,mds-vo-name=resource,o=grid GlueCEStateWaitingJobs: 0 GlueCEStateRunningJobs: 0 GlueCEStateTotalJobs: 0 GlueCEStateEstimatedResponseTime: 0 GlueCEStateWorstResponseTime: 0 GlueCEStateFreeJobSlots: 8 dn: GlueVOViewLocalID=dteam,GlueCEUniqueID=cream-31.pd.infn.it:8443/cream-pbs-creamtest2,mds-vo-name=resource,o=grid GlueCEStateWaitingJobs: 0 GlueCEStateRunningJobs: 0 GlueCEStateTotalJobs: 0 GlueCEStateEstimatedResponseTime: 0 GlueCEStateWorstResponseTime: 0 GlueCEStateFreeJobSlots: 8 dn: GlueVOViewLocalID=testers.eu-emi.eu,GlueCEUniqueID=cream-31.pd.infn.it:8443/cream-pbs-creamtest1,mds-vo-name=resource,o=grid GlueCEStateWaitingJobs: 0 GlueCEStateRunningJobs: 0 GlueCEStateTotalJobs: 0 GlueCEStateEstimatedResponseTime: 0 GlueCEStateWorstResponseTime: 0 GlueCEStateFreeJobSlots: 8 dn: GlueVOViewLocalID=testers.eu-emi.eu,GlueCEUniqueID=cream-31.pd.infn.it:8443/cream-pbs-creamtest2,mds-vo-name=resource,o=grid GlueCEStateWaitingJobs: 0 GlueCEStateRunningJobs: 0 GlueCEStateTotalJobs: 0 GlueCEStateEstimatedResponseTime: 0 GlueCEStateWorstResponseTime: 0 GlueCEStateFreeJobSlots: 8 dn: GlueVOViewLocalID=dteam,GlueCEUniqueID=cream-31.pd.infn.it:8443/cream-pbs-cert,mds-vo-name=resource,o=grid GlueCEStateWaitingJobs: 0 GlueCEStateRunningJobs: 0 GlueCEStateTotalJobs: 0 GlueCEStateEstimatedResponseTime: 0 GlueCEStateWorstResponseTime: 0 GlueCEStateFreeJobSlots: 8 dn: GlueVOViewLocalID=cms,GlueCEUniqueID=cream-31.pd.infn.it:8443/cream-pbs-creamtest1,mds-vo-name=resource,o=grid GlueCEStateWaitingJobs: 0 GlueCEStateRunningJobs: 0 GlueCEStateTotalJobs: 0 GlueCEStateEstimatedResponseTime: 0 GlueCEStateWorstResponseTime: 0 GlueCEStateFreeJobSlots: 8 dn: GlueVOViewLocalID=dteam,GlueCEUniqueID=cream-31.pd.infn.it:8443/cream-pbs-creamtest1,mds-vo-name=resource,o=grid GlueCEStateWaitingJobs: 0 GlueCEStateRunningJobs: 0 GlueCEStateTotalJobs: 0 GlueCEStateEstimatedResponseTime: 0 GlueCEStateWorstResponseTime: 0 GlueCEStateFreeJobSlots: 8 dn: GlueVOViewLocalID=cms,GlueCEUniqueID=cream-31.pd.infn.it:8443/cream-pbs-creamtest2,mds-vo-name=resource,o=grid GlueCEStateWaitingJobs: 0 GlueCEStateRunningJobs: 0 GlueCEStateTotalJobs: 0 GlueCEStateEstimatedResponseTime: 0 GlueCEStateWorstResponseTime: 0 GlueCEStateFreeJobSlots: 8 dn: GLUE2ShareID=creamtest1_cms_cream-31.pd.infn.it_ComputingElement,GLUE2ServiceID=cream-31.pd.infn.it_ComputingElement,GLUE2GroupID=resource,o=glue GLUE2ComputingShareRunningJobs: 0 GLUE2ComputingShareWaitingJobs: 0 GLUE2ComputingShareTotalJobs: 0 GLUE2ComputingShareEstimatedAverageWaitingTime: 0 GLUE2ComputingShareEstimatedWorstWaitingTime: 0 GLUE2ComputingShareFreeSlots: 8 dn: GLUE2ShareID=creamtest1_testers.eu-emi.eu_cream-31.pd.infn.it_ComputingElement,GLUE2ServiceID=cream-31.pd.infn.it_ComputingElement,GLUE2GroupID=resource,o=glue GLUE2ComputingShareRunningJobs: 0 GLUE2ComputingShareWaitingJobs: 0 GLUE2ComputingShareTotalJobs: 0 GLUE2ComputingShareEstimatedAverageWaitingTime: 0 GLUE2ComputingShareEstimatedWorstWaitingTime: 0 GLUE2ComputingShareFreeSlots: 8 dn: GLUE2ShareID=creamtest2_testers.eu-emi.eu_cream-31.pd.infn.it_ComputingElement,GLUE2ServiceID=cream-31.pd.infn.it_ComputingElement,GLUE2GroupID=resource,o=glue GLUE2ComputingShareRunningJobs: 0 GLUE2ComputingShareWaitingJobs: 0 GLUE2ComputingShareTotalJobs: 0 GLUE2ComputingShareEstimatedAverageWaitingTime: 0 GLUE2ComputingShareEstimatedWorstWaitingTime: 0 GLUE2ComputingShareFreeSlots: 8 dn: GLUE2ShareID=cert_dteam_cream-31.pd.infn.it_ComputingElement,GLUE2ServiceID=cream-31.pd.infn.it_ComputingElement,GLUE2GroupID=resource,o=glue GLUE2ComputingShareRunningJobs: 0 GLUE2ComputingShareWaitingJobs: 0 GLUE2ComputingShareTotalJobs: 0 GLUE2ComputingShareEstimatedAverageWaitingTime: 0 GLUE2ComputingShareEstimatedWorstWaitingTime: 0 GLUE2ComputingShareFreeSlots: 8 dn: GLUE2ShareID=creamtest1_dteam_cream-31.pd.infn.it_ComputingElement,GLUE2ServiceID=cream-31.pd.infn.it_ComputingElement,GLUE2GroupID=resource,o=glue GLUE2ComputingShareRunningJobs: 0 GLUE2ComputingShareWaitingJobs: 0 GLUE2ComputingShareTotalJobs: 0 GLUE2ComputingShareEstimatedAverageWaitingTime: 0 GLUE2ComputingShareEstimatedWorstWaitingTime: 0 GLUE2ComputingShareFreeSlots: 8 dn: GLUE2ShareID=creamtest2_cms_cream-31.pd.infn.it_ComputingElement,GLUE2ServiceID=cream-31.pd.infn.it_ComputingElement,GLUE2GroupID=resource,o=glue GLUE2ComputingShareRunningJobs: 0 GLUE2ComputingShareWaitingJobs: 0 GLUE2ComputingShareTotalJobs: 0 GLUE2ComputingShareEstimatedAverageWaitingTime: 0 GLUE2ComputingShareEstimatedWorstWaitingTime: 0 GLUE2ComputingShareFreeSlots: 8 dn: GLUE2ShareID=creamtest2_dteam_cream-31.pd.infn.it_ComputingElement,GLUE2ServiceID=cream-31.pd.infn.it_ComputingElement,GLUE2GroupID=resource,o=glue GLUE2ComputingShareRunningJobs: 0 GLUE2ComputingShareWaitingJobs: 0 GLUE2ComputingShareTotalJobs: 0 GLUE2ComputingShareEstimatedAverageWaitingTime: 0 GLUE2ComputingShareEstimatedWorstWaitingTime: 0 GLUE2ComputingShareFreeSlots: 8 [root@cream-31 ~]# yum downgrade lcg-info-dynamic-scheduler-pbs Loaded plugins: priorities, protectbase, security Setting up Downgrade Process EGI-trustanchors | 951 B 00:00 EMI-3-base | 1.9 kB 00:00 EMI-3-contribs | 1.9 kB 00:00 EMI-3-third-party | 1.9 kB 00:00 EMI-3-updates | 1.9 kB 00:00 IGI-testing-3-base | 2.9 kB 00:00 epel/metalink | 18 kB 00:00 epel | 4.4 kB 00:00 epel/primary_db | 6.2 MB 00:01 foreman | 2.9 kB 00:00 foreman-plugins | 2.9 kB 00:00 openstack-icehouse | 2.9 kB 00:00 puppetlabs-deps | 2.5 kB 00:00 puppetlabs-deps/primary_db | 23 kB 00:00 puppetlabs-products | 2.5 kB 00:00 puppetlabs-products/primary_db | 115 kB 00:00 sl | 3.6 kB 00:00 sl-security | 3.0 kB 00:00 sl6x | 3.6 kB 00:00 sl6x-security | 3.0 kB 00:00 520 packages excluded due to repository priority protections 0 packages excluded due to repository protections Resolving Dependencies --> Running transaction check ---> Package lcg-info-dynamic-scheduler-pbs.noarch 0:2.4.4-1.el6 will be a downgrade ---> Package lcg-info-dynamic-scheduler-pbs.noarch 0:2.4.5-1.el6 will be erased --> Finished Dependency Resolution Dependencies Resolved ============================================================================================================================================================================= Package Arch Version Repository Size ============================================================================================================================================================================= Downgrading: lcg-info-dynamic-scheduler-pbs noarch 2.4.4-1.el6 IGI-testing-3-base 22 k Transaction Summary ============================================================================================================================================================================= Downgrade 1 Package(s) Total download size: 22 k Is this ok [y/N]: y Downloading Packages: lcg-info-dynamic-scheduler-pbs-2.4.4-1.el6.noarch.rpm | 22 kB 00:00 Running rpm_check_debug Running Transaction Test Transaction Test Succeeded Running Transaction Installing : lcg-info-dynamic-scheduler-pbs-2.4.4-1.el6.noarch 1/2 Cleanup : lcg-info-dynamic-scheduler-pbs-2.4.5-1.el6.noarch 2/2 Verifying : lcg-info-dynamic-scheduler-pbs-2.4.4-1.el6.noarch 1/2 Verifying : lcg-info-dynamic-scheduler-pbs-2.4.5-1.el6.noarch 2/2 Removed: lcg-info-dynamic-scheduler-pbs.noarch 0:2.4.5-1.el6 Installed: lcg-info-dynamic-scheduler-pbs.noarch 0:2.4.4-1.el6 Complete! [root@cream-31 ~]# ll /etc/lrms/scheduler.conf -rw-r--r--. 1 root root 565 Jun 30 10:43 /etc/lrms/scheduler.conf [root@cream-31 ~]# diff /etc/lrms/scheduler.conf /etc/lrms/scheduler.conf.orig 20,26c20,24 < < < < < < [WSInterface] < status-probe : "/usr/libexec/glite-ce-check-submission-state /etc/glite-ce-dbtool/creamdb_min_access.conf" --- > [LRMS] > lrms_backend_cmd: /usr/libexec/lrmsinfo-pbs -s cream-31.pd.infn.it > [Scheduler] > cycle_time : 0 > vo_max_jobs_cmd: /usr/libexec/vomaxjobs-maui -h cream-31.pd.infn.it [root@cream-31 ~]# rpm -qf /etc/lrms/scheduler.conf dynsched-generic-2.5.4-1.el6.noarch [root@cream-31 ~]# # /opt/glite/yaim/bin/yaim -d 6 -c -s /root/siteinfo/site-info.def -n creamCE -n TORQUE_server -n TORQUE_utils [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# ll /var/lib/bdii/gip/plugin/glite-info-dynamic-ce -rwxr-xr-x. 1 ldap ldap 498 Jun 27 10:44 /var/lib/bdii/gip/plugin/glite-info-dynamic-ce [root@cream-31 ~]# [root@cream-31 ~]# ll /var/lib/bdii/gip/plugin/glite-info-dynamic-ce -rwxr-xr-x. 1 ldap ldap 498 Jun 27 10:44 /var/lib/bdii/gip/plugin/glite-info-dynamic-ce [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# /opt/glite/yaim/bin/yaim -d 6 -c -s /root/siteinfo/site-info.def -n creamCE -n TORQUE_server -n TORQUE_utils 2>&1 | tee /root/conf_emi_creamCE_TORQUE_downgraded_dynamic-scheduler-pbs.`hostname -s`.`date +%Y-%m-%d-%H-%M-%S` DEBUG: Checking siteinfo dir is not world readable […] Reloading sshd: [ OK ] INFO: Configuration Complete. [ OK ] INFO: YAIM terminated succesfully. [root@cream-31 ~]# /usr/bin/diagnose -g --keyfile=/var/spool/maui/maui.key Displaying group information... Name Priority Flags QDef QOSList* PartitionList Target Limits dteam 0 [NONE] [NONE] [NONE] [NONE] 0.00 [NONE] DEFAULT 0 [NONE] [NONE] [NONE] [NONE] 0.00 [NONE] [root@cream-31 ~]# diff /etc/lrms/scheduler.conf /etc/lrms/scheduler.conf.orig [root@cream-31 ~]# vi /etc/lrms/scheduler.conf [root@cream-31 ~]# ll /var/spool/maui/maui.key -rw-r--r--. 1 root root 7 Jun 30 10:25 /var/spool/maui/maui.key [root@cream-31 ~]# diff /etc/lrms/scheduler.conf /etc/lrms/scheduler.conf.orig 24c24 < vo_max_jobs_cmd: /usr/libexec/vomaxjobs-maui -h cream-31.pd.infn.it -k /var/spool/maui/maui.key --- > vo_max_jobs_cmd: /usr/libexec/vomaxjobs-maui -h cream-31.pd.infn.it [root@cream-31 ~]# runuser -s /bin/bash -c "/var/lib/bdii/gip/plugin/glite-info-dynamic-scheduler-wrapper -vvv" -- ldap ERROR:lcg-info-dynamic-scheduler:Execution error: VO max jobs backend command returned 3 [root@cream-31 ~]# /usr/libexec/vomaxjobs-maui -h cream-31.pd.infn.it -k /var/spool/maui/maui.key Traceback (most recent call last): File "/usr/libexec/vomaxjobs-maui", line 54, in main container = MAUIHandler.parseJobLimit(schedhost, keyarg, infile) File "/usr/lib/python2.6/site-packages/TorqueInfoUtils/MAUIHandler.py", line 87, in parseJobLimit tmps =+ ' --keyfile=%s' % keyfile TypeError: bad operand type for unary +: 'str' #################### https://issues.infn.it/jira/browse/CREAM-141 #################### [root@cream-31 ~]# rpm -qa |grep dynamic |sort lcg-info-dynamic-scheduler-pbs-2.4.4-1.el6.noarch [root@cream-31 ~]# ldapsearch -h cream-31.pd.infn.it -x -p 2170 -b "o=grid" objectclass=GLUECEPolicy | grep 999999999 GlueCEPolicyMaxSlotsPerJob: 999999999 GlueCEPolicyMaxWaitingJobs: 999999999 GlueCEPolicyMaxTotalJobs: 999999999 GlueCEPolicyMaxRunningJobs: 999999999 GlueCEPolicyMaxSlotsPerJob: 999999999 GlueCEPolicyMaxWaitingJobs: 999999999 GlueCEPolicyMaxTotalJobs: 999999999 GlueCEPolicyMaxRunningJobs: 999999999 GlueCEPolicyMaxSlotsPerJob: 999999999 GlueCEPolicyMaxWaitingJobs: 999999999 GlueCEPolicyMaxTotalJobs: 999999999 GlueCEPolicyMaxRunningJobs: 999999999 [root@cream-31 ~]# ldapsearch -h cream-31.pd.infn.it -x -p 2170 -b "o=grid" objectclass=GLUECEPolicy | grep 444444 [root@cream-31 ~]# ldapsearch -h cream-31.pd.infn.it -x -p 2170 -b "o=glue" objectclass=GLUE2ComputingShare | grep 999999999 GLUE2ComputingShareMaxTotalJobs: 999999999 GLUE2ComputingShareMaxWaitingJobs: 999999999 GLUE2ComputingShareMaxRunningJobs: 999999999 GLUE2ComputingShareMaxTotalJobs: 999999999 GLUE2ComputingShareMaxWaitingJobs: 999999999 GLUE2ComputingShareMaxRunningJobs: 999999999 GLUE2ComputingShareMaxTotalJobs: 999999999 GLUE2ComputingShareMaxWaitingJobs: 999999999 GLUE2ComputingShareMaxRunningJobs: 999999999 GLUE2ComputingShareMaxTotalJobs: 999999999 GLUE2ComputingShareMaxWaitingJobs: 999999999 GLUE2ComputingShareMaxRunningJobs: 999999999 GLUE2ComputingShareMaxTotalJobs: 999999999 GLUE2ComputingShareMaxWaitingJobs: 999999999 GLUE2ComputingShareMaxRunningJobs: 999999999 GLUE2ComputingShareMaxTotalJobs: 999999999 GLUE2ComputingShareMaxWaitingJobs: 999999999 GLUE2ComputingShareMaxRunningJobs: 999999999 GLUE2ComputingShareMaxTotalJobs: 999999999 GLUE2ComputingShareMaxWaitingJobs: 999999999 GLUE2ComputingShareMaxRunningJobs: 999999999 [root@cream-31 ~]# ldapsearch -h cream-31.pd.infn.it -x -p 2170 -b "o=glue" objectclass=GLUE2ComputingShare | grep 444444 GLUE2ComputingShareMaxMainMemory: 444444 GLUE2ComputingShareMaxSlotsPerJob: 444444 GLUE2ComputingShareMaxVirtualMemory: 444444 GLUE2ComputingShareMaxMainMemory: 444444 GLUE2ComputingShareMaxSlotsPerJob: 444444 GLUE2ComputingShareMaxVirtualMemory: 444444 GLUE2ComputingShareMaxMainMemory: 444444 GLUE2ComputingShareMaxSlotsPerJob: 444444 GLUE2ComputingShareMaxVirtualMemory: 444444 GLUE2ComputingShareMaxMainMemory: 444444 GLUE2ComputingShareMaxSlotsPerJob: 444444 GLUE2ComputingShareMaxVirtualMemory: 444444 GLUE2ComputingShareMaxMainMemory: 444444 GLUE2ComputingShareMaxSlotsPerJob: 444444 GLUE2ComputingShareMaxVirtualMemory: 444444 GLUE2ComputingShareMaxMainMemory: 444444 GLUE2ComputingShareMaxSlotsPerJob: 444444 GLUE2ComputingShareMaxVirtualMemory: 444444 GLUE2ComputingShareMaxMainMemory: 444444 GLUE2ComputingShareMaxSlotsPerJob: 444444 GLUE2ComputingShareMaxVirtualMemory: 444444 [root@cream-31 ~]# /etc/init.d/bdii restart Stopping BDII update process: [ OK ] Stopping BDII slapd: [ OK ] Starting BDII slapd: [ OK ] Starting BDII update process: [ OK ] [root@cream-31 ~]# grep ERROR /var/log/bdii/bdii-update.log ERROR:lcg-info-dynamic-scheduler:Execution error: Missing LRMS backend command in configuration […] ERROR:lcg-info-dynamic-scheduler:Execution error: Missing LRMS backend command in configuration ERROR:lcg-info-dynamic-scheduler:Execution error: VO max jobs backend command returned 3 […] ERROR:lcg-info-dynamic-scheduler:Execution error: VO max jobs backend command returned 3 2014-06-30 11:21:31,968: [ERROR] Information provider /var/lib/bdii/gip/plugin/glite-info-dynamic-ce terminated unexpectedly. [root@cream-31 ~]# yum update lcg-info-dynamic-scheduler-pbs Loaded plugins: priorities, protectbase, security 520 packages excluded due to repository priority protections 0 packages excluded due to repository protections Setting up Update Process Resolving Dependencies --> Running transaction check ---> Package lcg-info-dynamic-scheduler-pbs.noarch 0:2.4.4-1.el6 will be updated ---> Package lcg-info-dynamic-scheduler-pbs.noarch 0:2.4.5-1.el6 will be an update --> Finished Dependency Resolution Dependencies Resolved ============================================================================================================================================================================= Package Arch Version Repository Size ============================================================================================================================================================================= Updating: lcg-info-dynamic-scheduler-pbs noarch 2.4.5-1.el6 IGI-testing-3-base 22 k Transaction Summary ============================================================================================================================================================================= Upgrade 1 Package(s) Total download size: 22 k Is this ok [y/N]: y Downloading Packages: lcg-info-dynamic-scheduler-pbs-2.4.5-1.el6.noarch.rpm | 22 kB 00:00 Running rpm_check_debug Running Transaction Test Transaction Test Succeeded Running Transaction Updating : lcg-info-dynamic-scheduler-pbs-2.4.5-1.el6.noarch 1/2 Cleanup : lcg-info-dynamic-scheduler-pbs-2.4.4-1.el6.noarch 2/2 Verifying : lcg-info-dynamic-scheduler-pbs-2.4.5-1.el6.noarch 1/2 Verifying : lcg-info-dynamic-scheduler-pbs-2.4.4-1.el6.noarch 2/2 Updated: lcg-info-dynamic-scheduler-pbs.noarch 0:2.4.5-1.el6 [root@cream-31 ~]# grep ERROR /var/log/bdii/bdii-update.log ERROR:lcg-info-dynamic-scheduler:Execution error: Missing LRMS backend command in configuration […] ERROR:lcg-info-dynamic-scheduler:Execution error: Missing LRMS backend command in configuration ERROR:lcg-info-dynamic-scheduler:Execution error: VO max jobs backend command returned 3 […] ERROR:lcg-info-dynamic-scheduler:Execution error: VO max jobs backend command returned 3 2014-06-30 11:21:31,968: [ERROR] Information provider /var/lib/bdii/gip/plugin/glite-info-dynamic-ce terminated unexpectedly. ERROR:lcg-info-dynamic-scheduler:Execution error: Missing LRMS backend command in configuration [root@cream-31 ~]# /opt/glite/yaim/bin/yaim -d 6 -c -s /root/siteinfo/site-info.def -n creamCE -n TORQUE_server -n TORQUE_utils 2>&1 | tee /root/conf_emi_creamCE_TORQUE_upgraded_dynamic-scheduler-pbs.`hostname -s`.`date +%Y-%m-%d-%H-%M-%S` DEBUG: Checking siteinfo dir is not world readable […] Reloading sshd: [ OK ] INFO: Configuration Complete. [ OK ] INFO: YAIM terminated succesfully. [root@cream-31 ~]# ldapsearch -h cream-31.pd.infn.it -x -p 2170 -b "o=grid" objectclass=GLUECEPolicy | grep 999999999 [root@cream-31 ~]# ldapsearch -h cream-31.pd.infn.it -x -p 2170 -b "o=grid" objectclass=GLUECEPolicy | grep 444444 [root@cream-31 ~]# ldapsearch -h cream-31.pd.infn.it -x -p 2170 -b "o=glue" objectclass=GLUE2ComputingShare | grep 999999999 [root@cream-31 ~]# ldapsearch -h cream-31.pd.infn.it -x -p 2170 -b "o=glue" objectclass=GLUE2ComputingShare | grep 444444 ################### https://issues.infn.it/jira/browse/CREAM-139 ################### [root@cream-31 ~]# rpm -qa |grep dynamic |sort lcg-info-dynamic-scheduler-pbs-2.4.4-1.el6.noarch [root@cream-31 ~]# [root@cream-31 ~]# [root@cream-31 ~]# ll /var/lib/bdii/gip/plugin/glite-info-dynamic-ce -rwxr-xr-x. 1 ldap ldap 498 Jun 30 10:52 /var/lib/bdii/gip/plugin/glite-info-dynamic-ce [root@cream-31 ~]# yum update lcg-info-dynamic-scheduler-pbs Loaded plugins: priorities, protectbase, security 520 packages excluded due to repository priority protections 0 packages excluded due to repository protections Setting up Update Process Resolving Dependencies --> Running transaction check ---> Package lcg-info-dynamic-scheduler-pbs.noarch 0:2.4.4-1.el6 will be updated ---> Package lcg-info-dynamic-scheduler-pbs.noarch 0:2.4.5-1.el6 will be an update --> Finished Dependency Resolution Dependencies Resolved ============================================================================================================================================================================= Package Arch Version Repository Size ============================================================================================================================================================================= Updating: lcg-info-dynamic-scheduler-pbs noarch 2.4.5-1.el6 IGI-testing-3-base 22 k Transaction Summary ============================================================================================================================================================================= Upgrade 1 Package(s) Total download size: 22 k Is this ok [y/N]: y Downloading Packages: lcg-info-dynamic-scheduler-pbs-2.4.5-1.el6.noarch.rpm | 22 kB 00:00 Running rpm_check_debug Running Transaction Test Transaction Test Succeeded Running Transaction Updating : lcg-info-dynamic-scheduler-pbs-2.4.5-1.el6.noarch 1/2 Cleanup : lcg-info-dynamic-scheduler-pbs-2.4.4-1.el6.noarch 2/2 Verifying : lcg-info-dynamic-scheduler-pbs-2.4.5-1.el6.noarch 1/2 Verifying : lcg-info-dynamic-scheduler-pbs-2.4.4-1.el6.noarch 2/2 Updated: lcg-info-dynamic-scheduler-pbs.noarch 0:2.4.5-1.el6 Complete! [root@cream-31 ~]# ll /var/lib/bdii/gip/plugin/glite-info-dynamic-ce ls: cannot access /var/lib/bdii/gip/plugin/glite-info-dynamic-ce: No such file or directory [root@cream-31 ~]# /opt/glite/yaim/bin/yaim -d 6 -c -s /root/siteinfo/site-info.def -n creamCE -n TORQUE_server -n TORQUE_utils 2>&1 | tee /root/conf_emi_creamCE_TORQUE_upgraded_dynamic-scheduler-pbs.`hostname -s`.`date +%Y-%m-%d-%H-%M-%S` DEBUG: Checking siteinfo dir is not world readable […] Reloading sshd: [ OK ] INFO: Configuration Complete. [ OK ] INFO: YAIM terminated succesfully. [root@cream-31 ~]# ll /var/lib/bdii/gip/plugin/glite-info-dynamic-ce -rwxr-xr-x. 1 root root 498 Jun 30 11:30 /var/lib/bdii/gip/plugin/glite-info-dynamic-ce ###### Rebuild the package increasing the version number (2.4.6-1) and install the new package. ##### http://ci-01.cnaf.infn.it:8080/job/cream-info-dynamic-scheduler-pbs/58/console […] Fetching upstream changes from git://github.com/italiangrid/info-dynamic-scheduler-pbs > git fetch --tags --progress git://github.com/italiangrid/info-dynamic-scheduler-pbs +refs/heads/*:refs/remotes/origin/* > git rev-parse origin/2.4.6-1^{commit} > git rev-parse 2.4.6-1^{commit} ERROR: Couldn't find any revision to build. Verify the repository and branch configuration for this job. Notifying upstream projects of job completion Finished: FAILURE