Report generated on 15-Mar-2017 at 04:45:53 by pytest-html v1.14.2

Environment

389-ds-base 1.3.6.2-20170315gitf786639.el7
DS build 1.3.6.2
Packages {'py': '1.4.32', 'pytest': '3.0.7', 'pluggy': '0.4.0'}
Platform Linux-3.10.0-595.el7.x86_64-x86_64-with-redhat-7.4-Maipo
Plugins {'beakerlib': '0.7', 'html': '1.14.2', 'cov': '2.4.0', 'metadata': '1.3.0'}
Python 2.7.5
nspr 4.13.1-1.0.el7
nss 3.28.3-3.el7
openldap 2.4.44-1.el7
svrcore 4.1.2-1.el7

Summary

378 tests ran in 1779.43 seconds.

283 passed, 0 skipped, 95 failed, 140 errors, 0 expected failures, 0 unexpected passes

Results

Result Test Duration Links
Error suites/acl/acl_test.py::test_aci_attr_subtype_targetattr[lang-ja]::setup 11.36
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}Jpy5vQTLuGz18WkfRH977FWxxSGRshwxf7ISiIrd7uipwVr8UyNvmrlOLy4/hxn4tuIEyFaI2rjTNeHeKrc8EgOE7OByXauf INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}9ytBcbLc+seHmYx1u0CtKNwPR6bMfOylh6xBNrccg9jtkmhrI15l83LCP8rFSBTrFfsbOsTYrv7H8kihwsgDzms9YGbMRVkY
Error suites/acl/acl_test.py::test_aci_attr_subtype_targetattr[binary]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_aci_attr_subtype_targetattr[phonetic]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_mode_default_add_deny::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_mode_default_delete_deny::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod[0-cn=staged user,dc=example,dc=com-cn=accounts,dc=example,dc=com-False]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod[1-cn=staged user,dc=example,dc=com-cn=accounts,dc=example,dc=com-False]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod[2-cn=staged user,dc=example,dc=com-cn=bad*,dc=example,dc=com-True]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod[3-cn=st*,dc=example,dc=com-cn=accounts,dc=example,dc=com-False]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod[4-cn=bad*,dc=example,dc=com-cn=accounts,dc=example,dc=com-True]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod[5-cn=st*,dc=example,dc=com-cn=ac*,dc=example,dc=com-False]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod[6-None-cn=ac*,dc=example,dc=com-False]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod[7-cn=st*,dc=example,dc=com-None-False]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod[8-None-None-False]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod_9::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_prod_staging::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_check_repl_M2_to_M1::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_moddn_staging_prod_except::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_mode_default_ger_no_moddn::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_mode_default_ger_with_moddn::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_mode_switch_default_to_legacy::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_mode_legacy_ger_no_moddn1::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_mode_legacy_ger_no_moddn2::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_mode_legacy_ger_with_moddn::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_rdn_write_get_ger::setup 0.01
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/acl/acl_test.py::test_rdn_write_modrdn_anonymous::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_aci_attr_subtype_targetattr[lang-ja]'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
port=master1.port, properties=properties)
if not m2_m1_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m2_m1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
> master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:486:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:617: in init
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x2964250>
func = <built-in method result4 of LDAP object at 0x31d2300>
args = (14, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/config/config_test.py::test_maxbersize_repl::setup 5.69
request = <SubRequest 'topology_m2' for <Function 'test_maxbersize_repl'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x604d110>
func = <built-in method result4 of LDAP object at 0x31d2800>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}/3ObEoL26N1A8xZ2wbKojMdSxvb+po0Rg0T1QaaCPeslxl9iFo0Vu5eQuQGBrueXOe2vH5z0oqey00wkG3f/5ENk/HQIKkT5 INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}C3x97mouSgofrsUMn7CN5eVQOpGN0kpmRQNbVeCQTmdEVT1og+BJ3kcjgwQZoIWJniUsd7e288EQ+OHd7lKbHB9W5sINxsqD
Error suites/config/config_test.py::test_config_listen_backport_size::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_maxbersize_repl'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x604d110>
func = <built-in method result4 of LDAP object at 0x31d2800>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/config/config_test.py::test_config_deadlock_policy::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_maxbersize_repl'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x604d110>
func = <built-in method result4 of LDAP object at 0x31d2800>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/gssapi_repl/gssapi_repl_test.py::test_gssapi_repl::setup 5.70
request = <SubRequest 'topology_m2' for <Function 'test_gssapi_repl'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x5a846d0>
func = <built-in method result4 of LDAP object at 0x29431e8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}cgZwnjz5JYW2kcPNIaxVeKUvH9BkdIlthWrVd/rOwqCyPjWCwy4KdHIaKviNXm33s4ZRITNVb/un5L3UbFLbM3s3FGtTwvYu INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}nBjb5YJbauHCa0tdydLCxDbaXxhz0EkoWuHRSwTkHrI7YhUiDeRsxwspJv3aS76pzEHCzRQtV05j4dELUNkE7mZrlkF7v/aj
Error suites/paged_results/paged_results_test.py::test_multi_suffix_search::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>

@pytest.fixture(scope="module")
def new_suffixes(topology_st):
"""Add two suffixes with backends, one is a parent
of the another
"""

log.info('Adding suffix:{} and backend: {}'.format(NEW_SUFFIX_1, NEW_BACKEND_1))
topology_st.standalone.backend.create(NEW_SUFFIX_1,
> {BACKEND_NAME: NEW_BACKEND_1})

suites/paged_results/paged_results_test.py:75:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/backend.py:266: in create
ents = self.conn.backend.list(suffix=suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/backend.py:88: in list
ents = self.conn.search_s(base, scope, filt)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:590: in search_ext_s
msgid = self.search_ext(base,scope,filterstr,attrlist,attrsonly,serverctrls,clientctrls,timeout,sizelimit)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:586: in search_ext
timeout,sizelimit,
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x614f610>
func = <built-in method search_ext of LDAP object at 0x2b43328>
args = ('cn=plugins,cn=config', 2, '(&(objectclass=nsBackendInstance)(|(nsslapd-suffix=o=test_parent)(nsslapd-suffix=o=test_parent)))', None, 0, None, ...)
kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding suffix:o=test_parent and backend: parent_base INFO:lib389:List backend with suffix=o=test_parent
Error suites/paged_results/paged_results_test.py::test_maxsimplepaged_per_conn_failure[1]::teardown 0.29
def fin():
log.info('Deleting user {}'.format(TEST_USER_DN))
> topology_st.standalone.delete_s(TEST_USER_DN)

suites/paged_results/paged_results_test.py:62:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:333: in delete_s
return self.delete_ext_s(dn,None,None)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:325: in delete_ext_s
msgid = self.delete_ext(dn,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:322: in delete_ext
return self._ldap_call(self._l.delete_ext,dn,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x614f610>
func = <built-in method delete_ext of LDAP object at 0x2b43328>
args = ('uid=simplepaged_test,dc=example,dc=com', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 20 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00248,dc=example,dc=com): error (Can't contact LDAP server) --------------------------- Captured stdout teardown ---------------------------
Instance slapd-standalone_1 removed. --------------------------- Captured stderr teardown ---------------------------
INFO:tests.suites.paged_results.paged_results_test:Deleting user uid=simplepaged_test,dc=example,dc=com
Error suites/password/pwdPolicy_attribute_test.py::test_change_pwd[on-off-UNWILLING_TO_PERFORM]::setup 3.01
topology_st = <lib389.topologies.TopologyMain object at 0x65aaa10>
test_user = None

@pytest.fixture(scope="module")
def password_policy(topology_st, test_user):
"""Set up password policy for subtree and user"""

log.info('Enable fine-grained policy')
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
'nsslapd-pwpolicy-local',
'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
raise e

log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as e:
log.error('Failed to create pw policy policy for {}: error {}'.format(
OU_PEOPLE, e.message['desc']))
raise e

log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
'pwdpolicysubentry',
PW_POLICY_CONT_PEOPLE)])
except ldap.LDAPError as e:
log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_attribute_test.py:91: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.password.pwdPolicy_attribute_test:Adding user uid=simplepaged_test,ou=people,dc=example,dc=com INFO:tests.suites.password.pwdPolicy_attribute_test:Enable fine-grained policy INFO:tests.suites.password.pwdPolicy_attribute_test:Create password policy for subtree ou=people,dc=example,dc=com ldap_result: Can't contact LDAP server (-1) ldapmodify: update failed: cn=cn\=nsPwPolicyEntry\,ou\=people\,dc\=example\,dc\=com,cn=nsPwPolicyContainer,ou=people,dc=example,dc=com ldap_add: Can't contact LDAP server (-1) ldapmodify: update failed: cn=cn\=nsPwTemplateEntry\,ou\=people\,dc\=example\,dc\=com,cn=nsPwPolicyContainer,ou=people,dc=example,dc=com ldap_add: Can't contact LDAP server (-1) ldapmodify: update failed: cn=nsPwPolicy_cos,ou=people,dc=example,dc=com ldap_add: Can't contact LDAP server (-1) Error 255 while adding pwpolicy entries. Exiting. INFO:tests.suites.password.pwdPolicy_attribute_test:Add pwdpolicysubentry attribute to ou=people,dc=example,dc=com ERROR:tests.suites.password.pwdPolicy_attribute_test:Failed to pwdpolicysubentry pw policy policy for ou=people,dc=example,dc=com: error Can't contact LDAP server
Error suites/password/pwdPolicy_attribute_test.py::test_change_pwd[off-off-UNWILLING_TO_PERFORM]::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x65aaa10>
test_user = None

@pytest.fixture(scope="module")
def password_policy(topology_st, test_user):
"""Set up password policy for subtree and user"""

log.info('Enable fine-grained policy')
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
'nsslapd-pwpolicy-local',
'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
raise e

log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as e:
log.error('Failed to create pw policy policy for {}: error {}'.format(
OU_PEOPLE, e.message['desc']))
raise e

log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
'pwdpolicysubentry',
PW_POLICY_CONT_PEOPLE)])
except ldap.LDAPError as e:
log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_attribute_test.py:91: SERVER_DOWN
Error suites/password/pwdPolicy_attribute_test.py::test_change_pwd[off-on-None]::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x65aaa10>
test_user = None

@pytest.fixture(scope="module")
def password_policy(topology_st, test_user):
"""Set up password policy for subtree and user"""

log.info('Enable fine-grained policy')
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
'nsslapd-pwpolicy-local',
'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
raise e

log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as e:
log.error('Failed to create pw policy policy for {}: error {}'.format(
OU_PEOPLE, e.message['desc']))
raise e

log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
'pwdpolicysubentry',
PW_POLICY_CONT_PEOPLE)])
except ldap.LDAPError as e:
log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_attribute_test.py:91: SERVER_DOWN
Error suites/password/pwdPolicy_attribute_test.py::test_change_pwd[on-on-None]::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x65aaa10>
test_user = None

@pytest.fixture(scope="module")
def password_policy(topology_st, test_user):
"""Set up password policy for subtree and user"""

log.info('Enable fine-grained policy')
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
'nsslapd-pwpolicy-local',
'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
raise e

log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as e:
log.error('Failed to create pw policy policy for {}: error {}'.format(
OU_PEOPLE, e.message['desc']))
raise e

log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
'pwdpolicysubentry',
PW_POLICY_CONT_PEOPLE)])
except ldap.LDAPError as e:
log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_attribute_test.py:91: SERVER_DOWN
Error suites/password/pwdPolicy_attribute_test.py::test_pwd_min_age::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x65aaa10>
test_user = None

@pytest.fixture(scope="module")
def password_policy(topology_st, test_user):
"""Set up password policy for subtree and user"""

log.info('Enable fine-grained policy')
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
'nsslapd-pwpolicy-local',
'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
raise e

log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as e:
log.error('Failed to create pw policy policy for {}: error {}'.format(
OU_PEOPLE, e.message['desc']))
raise e

log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
'pwdpolicysubentry',
PW_POLICY_CONT_PEOPLE)])
except ldap.LDAPError as e:
log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_attribute_test.py:91: SERVER_DOWN
Error suites/password/pwdPolicy_attribute_test.py::test_pwd_min_age::teardown 0.30
def fin():
log.info('Deleting user {}'.format(TEST_USER_DN))
> topology_st.standalone.delete_s(TEST_USER_DN)

suites/password/pwdPolicy_attribute_test.py:52:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:333: in delete_s
return self.delete_ext_s(dn,None,None)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:325: in delete_ext_s
msgid = self.delete_ext(dn,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:322: in delete_ext
return self._ldap_call(self._l.delete_ext,dn,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x65aac50>
func = <built-in method delete_ext of LDAP object at 0x1da9508>
args = ('uid=simplepaged_test,ou=people,dc=example,dc=com', None, None)
kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
--------------------------- Captured stdout teardown ---------------------------
Instance slapd-standalone_1 removed. --------------------------- Captured stderr teardown ---------------------------
INFO:tests.suites.password.pwdPolicy_attribute_test:Deleting user uid=simplepaged_test,ou=people,dc=example,dc=com
Error suites/password/pwdPolicy_inherit_global_test.py::test_entry_has_no_restrictions[off-off]::setup 2.90
topology_st = <lib389.topologies.TopologyMain object at 0x67e1e50>
test_user = None

@pytest.fixture(scope="module")
def password_policy(topology_st, test_user):
"""Set global password policy.
Then, set fine-grained subtree level password policy
to ou=People with no password syntax.

Note: do not touch nsslapd-pwpolicy-inherit-global -- off by default
"""

log.info('Enable fine-grained policy')
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
'nsslapd-pwpolicy-local',
'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
raise e

log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as e:
log.error('Failed to create pw policy policy for {}: error {}'.format(
OU_PEOPLE, e.message['desc']))
raise e

log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
'pwdpolicysubentry',
PWP_CONTAINER_PEOPLE)])
except ldap.LDAPError as e:
log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_inherit_global_test.py:107: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.password.pwdPolicy_inherit_global_test:Adding user uid=buser,dc=example,dc=com INFO:tests.suites.password.pwdPolicy_inherit_global_test:Adding an aci for the bind user INFO:tests.suites.password.pwdPolicy_inherit_global_test:Enable fine-grained policy INFO:tests.suites.password.pwdPolicy_inherit_global_test:Create password policy for subtree ou=People,dc=example,dc=com ldap_result: Can't contact LDAP server (-1) ldapmodify: update failed: cn=cn\=nsPwPolicyEntry\,ou\=People\,dc\=example\,dc\=com,cn=nsPwPolicyContainer,ou=People,dc=example,dc=com ldap_add: Can't contact LDAP server (-1) ldapmodify: update failed: cn=cn\=nsPwTemplateEntry\,ou\=People\,dc\=example\,dc\=com,cn=nsPwPolicyContainer,ou=People,dc=example,dc=com ldap_add: Can't contact LDAP server (-1) ldapmodify: update failed: cn=nsPwPolicy_cos,ou=People,dc=example,dc=com ldap_add: Can't contact LDAP server (-1) Error 255 while adding pwpolicy entries. Exiting. INFO:tests.suites.password.pwdPolicy_inherit_global_test:Add pwdpolicysubentry attribute to ou=People,dc=example,dc=com ERROR:tests.suites.password.pwdPolicy_inherit_global_test:Failed to pwdpolicysubentry pw policy policy for ou=People,dc=example,dc=com: error Can't contact LDAP server
Error suites/password/pwdPolicy_inherit_global_test.py::test_entry_has_no_restrictions[on-off]::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x67e1e50>
test_user = None

@pytest.fixture(scope="module")
def password_policy(topology_st, test_user):
"""Set global password policy.
Then, set fine-grained subtree level password policy
to ou=People with no password syntax.

Note: do not touch nsslapd-pwpolicy-inherit-global -- off by default
"""

log.info('Enable fine-grained policy')
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
'nsslapd-pwpolicy-local',
'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
raise e

log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as e:
log.error('Failed to create pw policy policy for {}: error {}'.format(
OU_PEOPLE, e.message['desc']))
raise e

log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
'pwdpolicysubentry',
PWP_CONTAINER_PEOPLE)])
except ldap.LDAPError as e:
log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_inherit_global_test.py:107: SERVER_DOWN
Error suites/password/pwdPolicy_inherit_global_test.py::test_entry_has_no_restrictions[off-on]::setup 0.01
topology_st = <lib389.topologies.TopologyMain object at 0x67e1e50>
test_user = None

@pytest.fixture(scope="module")
def password_policy(topology_st, test_user):
"""Set global password policy.
Then, set fine-grained subtree level password policy
to ou=People with no password syntax.

Note: do not touch nsslapd-pwpolicy-inherit-global -- off by default
"""

log.info('Enable fine-grained policy')
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
'nsslapd-pwpolicy-local',
'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
raise e

log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as e:
log.error('Failed to create pw policy policy for {}: error {}'.format(
OU_PEOPLE, e.message['desc']))
raise e

log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
'pwdpolicysubentry',
PWP_CONTAINER_PEOPLE)])
except ldap.LDAPError as e:
log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_inherit_global_test.py:107: SERVER_DOWN
Error suites/password/pwdPolicy_inherit_global_test.py::test_entry_has_restrictions[cn=config]::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x67e1e50>
test_user = None

@pytest.fixture(scope="module")
def password_policy(topology_st, test_user):
"""Set global password policy.
Then, set fine-grained subtree level password policy
to ou=People with no password syntax.

Note: do not touch nsslapd-pwpolicy-inherit-global -- off by default
"""

log.info('Enable fine-grained policy')
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
'nsslapd-pwpolicy-local',
'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
raise e

log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as e:
log.error('Failed to create pw policy policy for {}: error {}'.format(
OU_PEOPLE, e.message['desc']))
raise e

log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
'pwdpolicysubentry',
PWP_CONTAINER_PEOPLE)])
except ldap.LDAPError as e:
log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_inherit_global_test.py:107: SERVER_DOWN
Error suites/password/pwdPolicy_inherit_global_test.py::test_entry_has_restrictions[cn="cn=nsPwPolicyEntry,ou=People,dc=example,dc=com",cn=nsPwPolicyContainer,ou=People,dc=example,dc=com]::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x67e1e50>
test_user = None

@pytest.fixture(scope="module")
def password_policy(topology_st, test_user):
"""Set global password policy.
Then, set fine-grained subtree level password policy
to ou=People with no password syntax.

Note: do not touch nsslapd-pwpolicy-inherit-global -- off by default
"""

log.info('Enable fine-grained policy')
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
'nsslapd-pwpolicy-local',
'on')])
except ldap.LDAPError as e:
log.error('Failed to set fine-grained policy: error {}'.format(
e.message['desc']))
raise e

log.info('Create password policy for subtree {}'.format(OU_PEOPLE))
try:
subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(),
'-D', DN_DM, '-w', PASSWORD,
'-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE,
'-S', OU_PEOPLE, '-Z', SERVERID_STANDALONE])
except subprocess.CalledProcessError as e:
log.error('Failed to create pw policy policy for {}: error {}'.format(
OU_PEOPLE, e.message['desc']))
raise e

log.info('Add pwdpolicysubentry attribute to {}'.format(OU_PEOPLE))
try:
topology_st.standalone.modify_s(OU_PEOPLE, [(ldap.MOD_REPLACE,
'pwdpolicysubentry',
PWP_CONTAINER_PEOPLE)])
except ldap.LDAPError as e:
log.error('Failed to pwdpolicysubentry pw policy ' \
'policy for {}: error {}'.format(OU_PEOPLE,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_inherit_global_test.py:107: SERVER_DOWN
Error suites/password/pwdPolicy_inherit_global_test.py::test_entry_has_restrictions[cn="cn=nsPwPolicyEntry,ou=People,dc=example,dc=com",cn=nsPwPolicyContainer,ou=People,dc=example,dc=com]::teardown 0.29
def fin():
log.info('Deleting user {}'.format(BN))
> topology_st.standalone.delete_s(BN)

suites/password/pwdPolicy_inherit_global_test.py:62:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:333: in delete_s
return self.delete_ext_s(dn,None,None)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:325: in delete_ext_s
msgid = self.delete_ext(dn,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:322: in delete_ext
return self._ldap_call(self._l.delete_ext,dn,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x67e1690>
func = <built-in method delete_ext of LDAP object at 0x29427d8>
args = ('uid=buser,dc=example,dc=com', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
--------------------------- Captured stdout teardown ---------------------------
Instance slapd-standalone_1 removed. --------------------------- Captured stderr teardown ---------------------------
INFO:tests.suites.password.pwdPolicy_inherit_global_test:Deleting user uid=buser,dc=example,dc=com
Error suites/password/pwdPolicy_warning_test.py::test_expiry_time::teardown 0.00
def fin():
"""Removes the user entry"""

log.info('Remove the user entry')
try:
topology_st.standalone.delete_s(USER_DN)
except ldap.LDAPError as ex:
log.error("Failed to remove user, error:{:s}" \
.format(ex.message['desc']))
> raise ex
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_warning_test.py:147: SERVER_DOWN
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default values INFO:tests.suites.password.pwdPolicy_warning_test:Set the new values INFO:tests.suites.password.pwdPolicy_warning_test:Add the user ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the password expiry warning time INFO:tests.suites.password.pwdPolicy_warning_test:Binding with (uid=tuser,dc=example,dc=com) and requesting the password expiry warning time INFO:tests.suites.password.pwdPolicy_warning_test:Bind with the user and request the password expiry warning time ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to get password expiry warning time, error:Can't contact LDAP server INFO:tests.suites.password.pwdPolicy_warning_test:Rebinding as DM --------------------------- Captured stderr teardown ---------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Remove the user entry ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to remove user, error:Can't contact LDAP server INFO:tests.suites.password.pwdPolicy_warning_test:Reset the defaults ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to set defaults, error:Can't contact LDAP server
Error suites/password/pwdPolicy_warning_test.py::test_password_warning[passwordSendExpiringTime-off]::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x67d5f10>
request = <SubRequest 'global_policy' for <Function 'test_password_warning[passwordSendExpiringTime-off]'>>

@pytest.fixture
def global_policy(topology_st, request):
"""Sets the required global
password policy attributes under
cn=config entry
"""

attrs = {'passwordExp': '',
'passwordMaxAge': '',
'passwordWarning': '',
CONFIG_ATTR: ''}
try:
log.info('Get the default values')
entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
'(objectClass=*)', attrs.keys())

for key in attrs.keys():
attrs[key] = entry.getValue(key)

log.info('Set the new values')
topology_st.standalone.modify_s(DN_CONFIG, [
(ldap.MOD_REPLACE, 'passwordExp', 'on'),
(ldap.MOD_REPLACE, 'passwordMaxAge', '172800'),
(ldap.MOD_REPLACE, 'passwordWarning', '86400'),
(ldap.MOD_REPLACE, CONFIG_ATTR, 'on')])

except ldap.LDAPError as ex:
log.error("Failed to set global password policy, error:{:s}" \
.format(ex.message['desc']))
> raise ex
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_warning_test.py:52: SERVER_DOWN
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default values ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to set global password policy, error:Can't contact LDAP server
Error suites/password/pwdPolicy_warning_test.py::test_password_warning[passwordWarning-3600]::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x67d5f10>
request = <SubRequest 'global_policy' for <Function 'test_password_warning[passwordWarning-3600]'>>

@pytest.fixture
def global_policy(topology_st, request):
"""Sets the required global
password policy attributes under
cn=config entry
"""

attrs = {'passwordExp': '',
'passwordMaxAge': '',
'passwordWarning': '',
CONFIG_ATTR: ''}
try:
log.info('Get the default values')
entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
'(objectClass=*)', attrs.keys())

for key in attrs.keys():
attrs[key] = entry.getValue(key)

log.info('Set the new values')
topology_st.standalone.modify_s(DN_CONFIG, [
(ldap.MOD_REPLACE, 'passwordExp', 'on'),
(ldap.MOD_REPLACE, 'passwordMaxAge', '172800'),
(ldap.MOD_REPLACE, 'passwordWarning', '86400'),
(ldap.MOD_REPLACE, CONFIG_ATTR, 'on')])

except ldap.LDAPError as ex:
log.error("Failed to set global password policy, error:{:s}" \
.format(ex.message['desc']))
> raise ex
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_warning_test.py:52: SERVER_DOWN
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default values ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to set global password policy, error:Can't contact LDAP server
Error suites/password/pwdPolicy_warning_test.py::test_with_different_password_states::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x67d5f10>
request = <SubRequest 'global_policy' for <Function 'test_with_different_password_states'>>

@pytest.fixture
def global_policy(topology_st, request):
"""Sets the required global
password policy attributes under
cn=config entry
"""

attrs = {'passwordExp': '',
'passwordMaxAge': '',
'passwordWarning': '',
CONFIG_ATTR: ''}
try:
log.info('Get the default values')
entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
'(objectClass=*)', attrs.keys())

for key in attrs.keys():
attrs[key] = entry.getValue(key)

log.info('Set the new values')
topology_st.standalone.modify_s(DN_CONFIG, [
(ldap.MOD_REPLACE, 'passwordExp', 'on'),
(ldap.MOD_REPLACE, 'passwordMaxAge', '172800'),
(ldap.MOD_REPLACE, 'passwordWarning', '86400'),
(ldap.MOD_REPLACE, CONFIG_ATTR, 'on')])

except ldap.LDAPError as ex:
log.error("Failed to set global password policy, error:{:s}" \
.format(ex.message['desc']))
> raise ex
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_warning_test.py:52: SERVER_DOWN
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default values ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to set global password policy, error:Can't contact LDAP server
Error suites/password/pwdPolicy_warning_test.py::test_default_behavior::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x67d5f10>
request = <SubRequest 'global_policy_default' for <Function 'test_default_behavior'>>

@pytest.fixture
def global_policy_default(topology_st, request):
"""Sets the required global password policy
attributes for testing the default behavior
of password expiry warning time
"""

attrs = {'passwordExp': '',
'passwordMaxAge': '',
'passwordWarning': '',
CONFIG_ATTR: ''}
try:
log.info('Get the default values')
entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
'(objectClass=*)', attrs.keys())
for key in attrs.keys():
attrs[key] = entry.getValue(key)

log.info('Set the new values')
topology_st.standalone.modify_s(DN_CONFIG, [
(ldap.MOD_REPLACE, 'passwordExp', 'on'),
(ldap.MOD_REPLACE, 'passwordMaxAge', '86400'),
(ldap.MOD_REPLACE, 'passwordWarning', '86400'),
(ldap.MOD_REPLACE, CONFIG_ATTR, 'off')])
except ldap.LDAPError as ex:
log.error("Failed to set global password policy, error:{:s}" \
.format(ex.message['desc']))
> raise ex
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_warning_test.py:99: SERVER_DOWN
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default values ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to set global password policy, error:Can't contact LDAP server
Error suites/password/pwdPolicy_warning_test.py::test_with_local_policy::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x67d5f10>
request = <SubRequest 'global_policy' for <Function 'test_with_local_policy'>>

@pytest.fixture
def global_policy(topology_st, request):
"""Sets the required global
password policy attributes under
cn=config entry
"""

attrs = {'passwordExp': '',
'passwordMaxAge': '',
'passwordWarning': '',
CONFIG_ATTR: ''}
try:
log.info('Get the default values')
entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
'(objectClass=*)', attrs.keys())

for key in attrs.keys():
attrs[key] = entry.getValue(key)

log.info('Set the new values')
topology_st.standalone.modify_s(DN_CONFIG, [
(ldap.MOD_REPLACE, 'passwordExp', 'on'),
(ldap.MOD_REPLACE, 'passwordMaxAge', '172800'),
(ldap.MOD_REPLACE, 'passwordWarning', '86400'),
(ldap.MOD_REPLACE, CONFIG_ATTR, 'on')])

except ldap.LDAPError as ex:
log.error("Failed to set global password policy, error:{:s}" \
.format(ex.message['desc']))
> raise ex
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/password/pwdPolicy_warning_test.py:52: SERVER_DOWN
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default values ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to set global password policy, error:Can't contact LDAP server
Error suites/plugins/accpol_test.py::test_actNinact_local::setup 4.96
topology_st = <lib389.topologies.TopologyMain object at 0x683cc50>

@pytest.fixture(scope="module")
def accpolicy_local(topology_st):
"""Configure local account policy plugin for ou=people subtree and restart the server"""

log.info('Enabling account policy plugin and restarting the server')
try:
topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
topology_st.standalone.modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', CONFIG_DN)])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
except ldap.LDAPError as e:
log.error('Failed to modify account policy plugin attrs')
raise

log.info('Adding Local account policy plugin configuration entries')
try:
topology_st.standalone.add_s(Entry((LOCAL_CONFIG, {
'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'accountpolicy'],
'accountInactivityLimit': '15'})))
topology_st.standalone.add_s(Entry((TEMPLT_COS, {
'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'cosTemplate'],
'acctPolicySubentry': LOCAL_CONFIG})))
topology_st.standalone.add_s(Entry((DEFN_COS, {
'objectclass': ['top', 'ldapsubentry', 'cosSuperDefinition', 'cosPointerDefinition'],
'cosTemplateDn': TEMPLT_COS,
'cosAttribute': 'acctPolicySubentry default operational-default'})))
except ldap.LDAPError as e:
log.error('Failed to add entry ({}, {}, {}):'.format(LOCAL_CONFIG, TEMPLT_COS, DEFN_COS))
raise
> topology_st.standalone.restart(timeout=10)

suites/plugins/accpol_test.py:55:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:1258: in restart
self.start(timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:1141: in start
"dirsrv@%s" % self.serverid])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

popenargs = (['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1'],)
kwargs = {}, retcode = 1
cmd = ['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1']

def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.

The arguments are the same as for the Popen constructor. Example:

check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
> raise CalledProcessError(retcode, cmd)
E CalledProcessError: Command '['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1']' returned non-zero exit status 1

/usr/lib64/python2.7/subprocess.py:542: CalledProcessError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389.utils:Enabling account policy plugin and restarting the server INFO:lib389.utils:Adding Local account policy plugin configuration entries Job for dirsrv@standalone_1.service failed because a fatal signal was delivered to the control process. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details.
Error suites/plugins/accpol_test.py::test_noinact_local::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x683cc50>

@pytest.fixture(scope="module")
def accpolicy_local(topology_st):
"""Configure local account policy plugin for ou=people subtree and restart the server"""

log.info('Enabling account policy plugin and restarting the server')
try:
topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
topology_st.standalone.modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', CONFIG_DN)])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
except ldap.LDAPError as e:
log.error('Failed to modify account policy plugin attrs')
raise

log.info('Adding Local account policy plugin configuration entries')
try:
topology_st.standalone.add_s(Entry((LOCAL_CONFIG, {
'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'accountpolicy'],
'accountInactivityLimit': '15'})))
topology_st.standalone.add_s(Entry((TEMPLT_COS, {
'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'cosTemplate'],
'acctPolicySubentry': LOCAL_CONFIG})))
topology_st.standalone.add_s(Entry((DEFN_COS, {
'objectclass': ['top', 'ldapsubentry', 'cosSuperDefinition', 'cosPointerDefinition'],
'cosTemplateDn': TEMPLT_COS,
'cosAttribute': 'acctPolicySubentry default operational-default'})))
except ldap.LDAPError as e:
log.error('Failed to add entry ({}, {}, {}):'.format(LOCAL_CONFIG, TEMPLT_COS, DEFN_COS))
raise
> topology_st.standalone.restart(timeout=10)

suites/plugins/accpol_test.py:55:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:1258: in restart
self.start(timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:1141: in start
"dirsrv@%s" % self.serverid])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

popenargs = (['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1'],)
kwargs = {}, retcode = 1
cmd = ['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1']

def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.

The arguments are the same as for the Popen constructor. Example:

check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
> raise CalledProcessError(retcode, cmd)
E CalledProcessError: Command '['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1']' returned non-zero exit status 1

/usr/lib64/python2.7/subprocess.py:542: CalledProcessError
Error suites/plugins/accpol_test.py::test_inact_local::setup 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x683cc50>

@pytest.fixture(scope="module")
def accpolicy_local(topology_st):
"""Configure local account policy plugin for ou=people subtree and restart the server"""

log.info('Enabling account policy plugin and restarting the server')
try:
topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
topology_st.standalone.modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', CONFIG_DN)])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
except ldap.LDAPError as e:
log.error('Failed to modify account policy plugin attrs')
raise

log.info('Adding Local account policy plugin configuration entries')
try:
topology_st.standalone.add_s(Entry((LOCAL_CONFIG, {
'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'accountpolicy'],
'accountInactivityLimit': '15'})))
topology_st.standalone.add_s(Entry((TEMPLT_COS, {
'objectclass': ['top', 'ldapsubentry', 'extensibleObject', 'cosTemplate'],
'acctPolicySubentry': LOCAL_CONFIG})))
topology_st.standalone.add_s(Entry((DEFN_COS, {
'objectclass': ['top', 'ldapsubentry', 'cosSuperDefinition', 'cosPointerDefinition'],
'cosTemplateDn': TEMPLT_COS,
'cosAttribute': 'acctPolicySubentry default operational-default'})))
except ldap.LDAPError as e:
log.error('Failed to add entry ({}, {}, {}):'.format(LOCAL_CONFIG, TEMPLT_COS, DEFN_COS))
raise
> topology_st.standalone.restart(timeout=10)

suites/plugins/accpol_test.py:55:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:1258: in restart
self.start(timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:1141: in start
"dirsrv@%s" % self.serverid])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

popenargs = (['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1'],)
kwargs = {}, retcode = 1
cmd = ['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1']

def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.

The arguments are the same as for the Popen constructor. Example:

check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
> raise CalledProcessError(retcode, cmd)
E CalledProcessError: Command '['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1']' returned non-zero exit status 1

/usr/lib64/python2.7/subprocess.py:542: CalledProcessError
Error suites/replication/acceptance_test.py::test_add_entry::setup 11.48
request = <SubRequest 'topo' for <Function 'test_add_entry'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6839cd0>
func = <built-in method result4 of LDAP object at 0x2912198>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}170K8+xFPt2VTMn8wEvxzRzxsnhymHiaIs467djUYi63jVNEOtV3qudkyFLGgCl3qY1pGZT443oXaFK1I4uUIDM0b4RpD6yQ INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}yLnW0BQ4SerFj6jSoFohDVT37/k6rh2t7Mf4pJ8kfRQPZu3RoBsKnmCmeMK42wItfIvdOH32SnhOiBkLFkhIBd9Z27oPOVgL INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}KiDNuqlIhxYiKko5dP/8knpaBYAWagMU/H+FUF09x6Jx4k62K6JrBqxZQHD4u/HangWHTULVlxWJVyRmKMnS+lCYP1B0EQhl INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}A8dYBEJt3yo8V0WMyzTDqt2qphcvNC0Lu6zhx9j9JVN1TYZS11ngWcQqjkd0RXAFea67htzrLxSij4pjOcGBwiFV1G9HJwCe
Error suites/replication/acceptance_test.py::test_modify_entry::setup 0.00
request = <SubRequest 'topo' for <Function 'test_add_entry'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6839cd0>
func = <built-in method result4 of LDAP object at 0x2912198>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/acceptance_test.py::test_delete_entry::setup 0.00
request = <SubRequest 'topo' for <Function 'test_add_entry'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6839cd0>
func = <built-in method result4 of LDAP object at 0x2912198>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/acceptance_test.py::test_modrdn_entry[0]::setup 0.00
request = <SubRequest 'topo' for <Function 'test_add_entry'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6839cd0>
func = <built-in method result4 of LDAP object at 0x2912198>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/acceptance_test.py::test_modrdn_entry[1]::setup 0.00
request = <SubRequest 'topo' for <Function 'test_add_entry'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6839cd0>
func = <built-in method result4 of LDAP object at 0x2912198>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/cleanallruv_test.py::test_cleanallruv_init::setup 11.45
request = <SubRequest 'topology_m4' for <Function 'test_cleanallruv_init'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6c2cf90>
func = <built-in method result4 of LDAP object at 0x2942d78>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}X+KTP0D/5dLKm2J6knfurwiUZcEFjnIsAwvGFPDnapGwaVq3SZB17mIwCG0XaUmkoCKTrust8UOWfAxBQpISNVQEPd+x9MaK INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}Xm8JP+Z+5wrtobLfAmjG/tqPifrH4qyQYQnpcilNi9rboc9Jr/zrbR76rYYrkCUGiwSF08JpXeNscMnH3WbIUXfLQqv3WNfA INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}uYzWv4c9Ki5m1socyJ1isb12wt5xmNf0E/q+Jt0RcaiYL9QhiGXjMlldp+1by+HpgAGAmBGGOY8aYX3rMujEmBMYl1xd+/AN INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}YEtEg0IKJUtHL6dMgYX6I0EpsWthhhZwg6xZD+JdgVn5tz8CIQq31z3LggwNQAg+a2xgEZoJ29U3QnqmGRGfHCzlx4eeMc+3
Error suites/replication/cleanallruv_test.py::test_cleanallruv_clean::setup 0.00
request = <SubRequest 'topology_m4' for <Function 'test_cleanallruv_init'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6c2cf90>
func = <built-in method result4 of LDAP object at 0x2942d78>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/cleanallruv_test.py::test_cleanallruv_clean_restart::setup 0.00
request = <SubRequest 'topology_m4' for <Function 'test_cleanallruv_init'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6c2cf90>
func = <built-in method result4 of LDAP object at 0x2942d78>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/cleanallruv_test.py::test_cleanallruv_clean_force::setup 0.00
request = <SubRequest 'topology_m4' for <Function 'test_cleanallruv_init'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6c2cf90>
func = <built-in method result4 of LDAP object at 0x2942d78>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/cleanallruv_test.py::test_cleanallruv_abort::setup 0.00
request = <SubRequest 'topology_m4' for <Function 'test_cleanallruv_init'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6c2cf90>
func = <built-in method result4 of LDAP object at 0x2942d78>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/cleanallruv_test.py::test_cleanallruv_abort_restart::setup 0.00
request = <SubRequest 'topology_m4' for <Function 'test_cleanallruv_init'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6c2cf90>
func = <built-in method result4 of LDAP object at 0x2942d78>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/cleanallruv_test.py::test_cleanallruv_abort_certify::setup 0.00
request = <SubRequest 'topology_m4' for <Function 'test_cleanallruv_init'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6c2cf90>
func = <built-in method result4 of LDAP object at 0x2942d78>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/cleanallruv_test.py::test_cleanallruv_stress_clean::setup 0.00
request = <SubRequest 'topology_m4' for <Function 'test_cleanallruv_init'>>

@pytest.fixture(scope="module")
def topology_m4(request):
"""Create Replication Deployment with four masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

# Creating master 4...
if DEBUGGING:
master4 = DirSrv(verbose=True)
else:
master4 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_4
args_instance[SER_PORT] = PORT_MASTER_4
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master4.allocate(args_master)
instance_master4 = master4.exists()
if instance_master4:
master4.delete()
master4.create()
master4.open()
master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_4)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
master4.stop()
else:
master1.delete()
master2.delete()
master3.delete()
master4.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 1 to master 4
properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
port=master4.port, properties=properties)
if not m1_m4_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m4_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:823:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6c2cf90>
func = <built-in method result4 of LDAP object at 0x2942d78>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/single_master_test.py::test_mail_attr_repl::setup 26.83
request = <SubRequest 'topo_r' for <Function 'test_mail_attr_repl'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ('Update failed: status', "-1 - LDAP error: Can't contact LDAP server") ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}hRP1uxY6bZwNghyGEm5xA3os72vYGBm1rCo4vtqZi/v2CbteK1w+ovggLLw4TeYI6gz+DhLH5pYdFU902yxlYawU6tYyxxlk INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}ezXhegtIeh6KyywoThTKSRl9wmDSqbGyeSFplY6K51laIbb3/6ulREpi6KuxPe/k8zjUExlzYIxKuLvodd4v6XSxJblSxCOx INFO:lib389:Starting total init cn=meTo_localhost:39201,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config CRITICAL:lib389:testReplication() failed to modify (dc=example,dc=com), error ({'desc': "Can't contact LDAP server"}) CRITICAL:lib389.topologies:Replication is not working.
Error suites/replication/wait_for_async_feature_test.py::test_not_int_value::setup 5.75
request = <SubRequest 'topology_m2' for <Function 'test_not_int_value'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x724afd0>
func = <built-in method result4 of LDAP object at 0x31ad7d8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}NTV2hXbeX9aCpg0pthxPjk/pdYsvKWCuggCEJBiW6om7NaNiiJfgFcPoKTo4G15ps/p0f7vXh+OrJ/bgfdsk4EsIyPqiea8p INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}OEa4HcGYAYo6xNJ+40JIT+OnP/O9Gjn7diYHgSS+EfrAfQK7qhH8f/EG3O2sKaw8f3QcvBefzibvJTdqeJbzrM9BFZShHTEL
Error suites/replication/wait_for_async_feature_test.py::test_multi_value::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_not_int_value'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x724afd0>
func = <built-in method result4 of LDAP object at 0x31ad7d8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/wait_for_async_feature_test.py::test_value_check[waitfor_async_attr0]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_not_int_value'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x724afd0>
func = <built-in method result4 of LDAP object at 0x31ad7d8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/wait_for_async_feature_test.py::test_value_check[waitfor_async_attr1]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_not_int_value'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x724afd0>
func = <built-in method result4 of LDAP object at 0x31ad7d8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/wait_for_async_feature_test.py::test_value_check[waitfor_async_attr2]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_not_int_value'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x724afd0>
func = <built-in method result4 of LDAP object at 0x31ad7d8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/wait_for_async_feature_test.py::test_value_check[waitfor_async_attr3]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_not_int_value'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x724afd0>
func = <built-in method result4 of LDAP object at 0x31ad7d8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/wait_for_async_feature_test.py::test_behavior_with_value[waitfor_async_attr0]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_not_int_value'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x724afd0>
func = <built-in method result4 of LDAP object at 0x31ad7d8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/wait_for_async_feature_test.py::test_behavior_with_value[waitfor_async_attr1]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_not_int_value'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x724afd0>
func = <built-in method result4 of LDAP object at 0x31ad7d8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/wait_for_async_feature_test.py::test_behavior_with_value[waitfor_async_attr2]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_not_int_value'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x724afd0>
func = <built-in method result4 of LDAP object at 0x31ad7d8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error suites/replication/wait_for_async_feature_test.py::test_behavior_with_value[waitfor_async_attr3]::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_not_int_value'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x724afd0>
func = <built-in method result4 of LDAP object at 0x31ad7d8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47462_test.py::test_ticket47462::setup 5.72
request = <SubRequest 'topology_m2' for <Function 'test_ticket47462'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7b7b190>
func = <built-in method result4 of LDAP object at 0x31d2ad0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}el5rM64bofiNhUATc9u6xvg1QJpzQ7arRdFyV7SNI4u3PinPEf2LEX+SNdfA/RBJvxfF+Fcuyq0Fh9r7nBAWDl/szGiGarqN INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}pDMOKIzbHbZxLNrC+Z8iMiDoab7QslvduHoyZgs1A9WFySkOgBR7WmM6DRBlaA+qxd7X2lb6wu7IJoz5QbgxzSV9+WRne/OV
Error tickets/ticket47490_test.py::test_ticket47490_init::setup 26.65
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47490_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ('Update failed: status', "-1 - LDAP error: Can't contact LDAP server") ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}hgTAVy9BXWq2jBD9QtBIIbSB+u/DBd8RTpcDXfWJvPtIhr/dKiJguo4OojdhkP7n6kuXUTc4jPCCspuZ7j29HmcCI7FV3NUW INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}tReh9AxEs5a3WsGKjtO2a48OlBn1kAzCe2bb8nmc/P0c4w/vJeRXEoPzU7vciXHD1LszCSy177WVhiMpLBgtoRuDJN6WW8FF INFO:lib389:Starting total init cn=meTo_localhost:39201,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config CRITICAL:lib389:testReplication() failed to modify (dc=example,dc=com), error ({'desc': "Can't contact LDAP server"}) CRITICAL:lib389.topologies:Replication is not working.
Error tickets/ticket47490_test.py::test_ticket47490_one::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47490_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47490_test.py::test_ticket47490_two::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47490_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47490_test.py::test_ticket47490_three::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47490_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47490_test.py::test_ticket47490_four::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47490_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47490_test.py::test_ticket47490_five::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47490_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47490_test.py::test_ticket47490_six::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47490_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47490_test.py::test_ticket47490_seven::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47490_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47490_test.py::test_ticket47490_eight::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47490_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47490_test.py::test_ticket47490_nine::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47490_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47536_test.py::test_ticket47536::setup 5.75
request = <SubRequest 'topology_m2' for <Function 'test_ticket47536'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x69a1a90>
func = <built-in method result4 of LDAP object at 0x2b43350>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}jqvTQXBF0PMMmkfbukUVz+5Y9rW/kB1rvXWUVHSuePYf03Rp5cctHmqlt5CdiDwvUkaWkxkPmrUU6QeBIRJ4nPb3NF2wNl4L INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}AJK0jQry4Vj+/gdWWrx5k/F8ItWHZ7S+Ubxi1OehsyrqDN16ag1to+mnXuInZNy/gfNJsKPPa8nhi7dnrZKOXcLS8rtFP9aD
Error tickets/ticket47573_test.py::test_ticket47573_init::setup 25.85
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47573_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ('Update failed: status', "-1 - LDAP error: Can't contact LDAP server") ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}jzZtGgbeQn7Fsku7+f/eUn20B/PDNJiutqosdr1ccebd5Y7n4VpE0f0FylC7HIJlcg4FUygUWZNLgXGwqcXeoOefFk8NdEjV INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}WaPDBnHdbXHWLyWHebAQ50yUgDI59oY+7KzYqPUbfOb4sWrpKKCJ8SGSMZucQwOtdtrDvtBEmKYvt/zIkH/S9dmoy+g4qlzE INFO:lib389:Starting total init cn=meTo_localhost:39201,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config CRITICAL:lib389:testReplication() failed to modify (dc=example,dc=com), error ({'desc': "Can't contact LDAP server"}) CRITICAL:lib389.topologies:Replication is not working.
Error tickets/ticket47573_test.py::test_ticket47573_one::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47573_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47573_test.py::test_ticket47573_two::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47573_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47573_test.py::test_ticket47573_three::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47573_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47619_test.py::test_ticket47619_init::setup 26.82
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47619_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ('Update failed: status', "-1 - LDAP error: Can't contact LDAP server") ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}pptgOWS65PjgeRQe8+FPvgITBy/Sk25JDFuATCrICmF6m/8plapYhuyG0tsEgRPwMQgcc1hMfXwRUWOum4oAJSfgF1jsxR42 INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}IWOvV1sVaxgMfshD2XpiMdAVV5cKMVl3Z2In5k/iCeqPuhgJvgjVQlFBXejMddJdCRF2Y91fEjty9F8ui6ldSA/1AmHPI2Lt INFO:lib389:Starting total init cn=meTo_localhost:39201,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config CRITICAL:lib389:testReplication() failed to modify (dc=example,dc=com), error ({'desc': "Can't contact LDAP server"}) CRITICAL:lib389.topologies:Replication is not working.
Error tickets/ticket47619_test.py::test_ticket47619_create_index::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47619_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47619_test.py::test_ticket47619_reindex::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47619_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47619_test.py::test_ticket47619_check_indexed_search::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47619_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47653MMR_test.py::test_ticket47653_init::setup 5.72
request = <SubRequest 'topology_m2' for <Function 'test_ticket47653_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7806850>
func = <built-in method result4 of LDAP object at 0x2f0b3f0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}h4IIBaxv+mB17mfsFmwiopiFSwaKeTs6nPZqjNYohm5d0X+S7L1fKDrUZng/r7+RkyvZPoBL4GXy0MOYE2CwnHTr2OdeFmy5 INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}hoxK07NOAu5eMKiLthieq6M/cF9jDWd7QTPxN4jAheYjFgUvKUTEucjV/wXiV68PbW6wNw7pjuZ4/cdSHaVlvf8ddlUJiSB9
Error tickets/ticket47653MMR_test.py::test_ticket47653_add::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47653_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7806850>
func = <built-in method result4 of LDAP object at 0x2f0b3f0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47653MMR_test.py::test_ticket47653_modify::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47653_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7806850>
func = <built-in method result4 of LDAP object at 0x2f0b3f0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47676_test.py::test_ticket47676_init::setup 5.73
request = <SubRequest 'topology_m2' for <Function 'test_ticket47676_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f64950>
func = <built-in method result4 of LDAP object at 0x34a69b8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}R/gOrCMNwwpoU6NHHtDTGqtdM6Of7jboR+6Ql8ImAq86eN65PoZFkjCNEgEaKw3uMeQxHgxAFfTvZ3KhIkTrD0nW4kF5UNtF INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}oIkanFqjRCkOBXH66DtNa7GfJrTujLV1Wj648CuicpywA5LaB5yflO/KLpMvOXxkESW8TnyvdXNs3KCvF43MKOKVcq0OFML4
Error tickets/ticket47676_test.py::test_ticket47676_skip_oc_at::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47676_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f64950>
func = <built-in method result4 of LDAP object at 0x34a69b8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47676_test.py::test_ticket47676_reject_action::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47676_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f64950>
func = <built-in method result4 of LDAP object at 0x34a69b8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47721_test.py::test_ticket47721_init::setup 5.88
request = <SubRequest 'topology_m2' for <Function 'test_ticket47721_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7e74750>
func = <built-in method result4 of LDAP object at 0x294a0a8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}u3sEBRYNNNY7ldpsAHd6jE4m9H2vGoAbpua0fPwuYeHRru8F/3ScLVlT3EZ6wBJLHjsZJab2izGmZ8uc0XHBcXTXQlbtkun+ INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}KE9FaGQLwIyzKNRKkInFWedp9XRYHgB5kVJ8rMLKT1Bjd0rxJTPHn+TE2jlVkyhUToRjU9Yz8i69LFJolkRfdZ6/Dx79RhFl
Error tickets/ticket47721_test.py::test_ticket47721_0::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47721_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7e74750>
func = <built-in method result4 of LDAP object at 0x294a0a8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47721_test.py::test_ticket47721_1::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47721_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7e74750>
func = <built-in method result4 of LDAP object at 0x294a0a8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47721_test.py::test_ticket47721_2::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47721_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7e74750>
func = <built-in method result4 of LDAP object at 0x294a0a8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47721_test.py::test_ticket47721_3::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47721_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7e74750>
func = <built-in method result4 of LDAP object at 0x294a0a8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47721_test.py::test_ticket47721_4::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47721_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7e74750>
func = <built-in method result4 of LDAP object at 0x294a0a8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47787_test.py::test_ticket47787_init::setup 5.76
request = <SubRequest 'topology_m2' for <Function 'test_ticket47787_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x800b590>
func = <built-in method result4 of LDAP object at 0x2902f30>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}HPyafDvUeMK3fPELhgPcXEBcUBlsRKHTDsM6WUznZt29YC6kqv2cLkQ+SzzSW+6lrgZ1ALWOYcVZhrF11rgr8NVulRHbPyTi INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}Hd54CxZCaD+PHU12wp7gc8Ni3sqjvsh7l3G/Y0xv21hEjCMrlROGJpz0SmRlgCjQcsg+uLaIBDZelMzuUMcJsRnoA71pe0nj
Error tickets/ticket47787_test.py::test_ticket47787_2::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47787_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x800b590>
func = <built-in method result4 of LDAP object at 0x2902f30>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47869MMR_test.py::test_ticket47869_init::setup 5.72
request = <SubRequest 'topology_m2' for <Function 'test_ticket47869_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7e80590>
func = <built-in method result4 of LDAP object at 0x2949bc0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}sMQapEN1OD8lUuR0zI2CoXZPnfZENHgvGVg+UG7zevOuDI4Re6aEbWSR5vsR7MEuWrN8XpmhuUyE1KtxeiH7G+ckYA0v90mz INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}FyBL6xHavwJQ+hHEiNVZSeOtcikNJ/3e6nJV1S8iR884ERlW4qVkvOm+duNvUZMF4Ed5uLt1a5zZr6rHVyA6Wzpi8uSj4WyK
Error tickets/ticket47869MMR_test.py::test_ticket47869_check::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47869_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7e80590>
func = <built-in method result4 of LDAP object at 0x2949bc0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47871_test.py::test_ticket47871_init::setup 25.83
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47871_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ('Update failed: status', "-1 - LDAP error: Can't contact LDAP server") ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}qYU7ixfuHwqVUHK0RNW7QzM5XMXv2AM8/VByLzkzZnj8rkMh792wItlz31cekq1vISBawlhNDFCLVaNfuHd7BwXJhXr7Yoq6 INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}VDykCyT6nDco77ML9LDINxBs0+f7VupSj44MEiW9EXrzg/R4RmYK630T93pROosg1HB/ppoi6Q43CEEze/cBLUQSsBuP+3QC INFO:lib389:Starting total init cn=meTo_localhost:39201,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config CRITICAL:lib389:testReplication() failed to modify (dc=example,dc=com), error ({'desc': "Can't contact LDAP server"}) CRITICAL:lib389.topologies:Replication is not working.
Error tickets/ticket47871_test.py::test_ticket47871_1::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47871_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47871_test.py::test_ticket47871_2::setup 0.00
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket47871_init'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
Error tickets/ticket47966_test.py::test_ticket47966::setup 5.71
request = <SubRequest 'topology_m2' for <Function 'test_ticket47966'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x88f45d0>
func = <built-in method result4 of LDAP object at 0x34a54e0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}QsBONSXDrwaB3c4oUcPG0+vFqpU3xN85JWE+wKm1eNxjNGBr9y4gGquWmng3vcIom8ihmQ20BxGJuvcvp5FXNKwyP6PBKCYs INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}m+HFhZvfHzwozisQjhtpNNqgzaZnRWccRjqPqc0kjsPPt0NKAgGTXOSpU/a7XD5eClxdJj3ZEBY1bLy2z2bmdUJsFyM6kc6w
Error tickets/ticket47988_test.py::test_ticket47988_init::setup 5.47
request = <SubRequest 'topology_m2' for <Function 'test_ticket47988_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8aa84d0>
func = <built-in method result4 of LDAP object at 0x295a6c0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}PTOUfBw1IXwueF6uogfPM+Qf950TYDmatdTnH+joFleFQcrbKrkgVsyBV1nn6EyO1hasbQpB16VXm/dWYqJuXFzeNty4d6PZ INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}f92zhGgqFGd0wpH16gNS0zlER3c/Gnhuz3uQpL5DoQ6E7VQc7tOzAsyQhCBYhvGy+4sOwD+ZeFRccX3cQqLbS33uC45HeaRt
Error tickets/ticket47988_test.py::test_ticket47988_1::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47988_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8aa84d0>
func = <built-in method result4 of LDAP object at 0x295a6c0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47988_test.py::test_ticket47988_2::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47988_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8aa84d0>
func = <built-in method result4 of LDAP object at 0x295a6c0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47988_test.py::test_ticket47988_3::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47988_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8aa84d0>
func = <built-in method result4 of LDAP object at 0x295a6c0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47988_test.py::test_ticket47988_4::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47988_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8aa84d0>
func = <built-in method result4 of LDAP object at 0x295a6c0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47988_test.py::test_ticket47988_5::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47988_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8aa84d0>
func = <built-in method result4 of LDAP object at 0x295a6c0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket47988_test.py::test_ticket47988_6::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket47988_init'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8aa84d0>
func = <built-in method result4 of LDAP object at 0x295a6c0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket48226_test.py::test_ticket48226_set_purgedelay::setup 5.76
request = <SubRequest 'topology_m2' for <Function 'test_ticket48226_set_purgedelay'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7d9c550>
func = <built-in method result4 of LDAP object at 0x295ab70>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}b656dIeF80Yes+7vj0LHBNztxrKXWOVZKlInZHXfTEozUont1dEZlskZ6HzIzmaNimv4QZ3jHcTjabRJ7Rcogf/No/sOZr7x INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}2THYVktqxtjHwxBMC5XxDyDMtLDmJLodvyEahTOm6C7NRuA17dl2TxBjje9KtjHvHDaVnj6QVaEwtXxOUPozRcfcr918MPL5
Error tickets/ticket48226_test.py::test_ticket48226_1::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket48226_set_purgedelay'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7d9c550>
func = <built-in method result4 of LDAP object at 0x295ab70>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket48266_test.py::test_ticket48266_fractional::setup 5.72
request = <SubRequest 'topology_m2' for <Function 'test_ticket48266_fractional'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xea48f90>
func = <built-in method result4 of LDAP object at 0x2949fd0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}mw5qqBzunvTh8izPA38l4NjmBZoKJ/nwGtzFQGDMloNa/xbS+xHe2aWuF5Mqf+BnigiRVHg2mH/t3baqTCVHoeDU0WPEc/iS INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}ZZCZwPwEzKMBbixKYC4faJsMzXs1MUt5dLj0qVEpPUAQzW4qVqvT01NLrTAoq+kpijZPGi6dPPYIwEYce2lGZyYX+oHUIhmF
Error tickets/ticket48266_test.py::test_ticket48266_check_repl_desc::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket48266_fractional'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xea48f90>
func = <built-in method result4 of LDAP object at 0x2949fd0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket48266_test.py::test_ticket48266_count_csn_evaluation::setup 0.00
request = <SubRequest 'topology_m2' for <Function 'test_ticket48266_fractional'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xea48f90>
func = <built-in method result4 of LDAP object at 0x2949fd0>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Error tickets/ticket48325_test.py::test_ticket48325::setup 8.53
request = <SubRequest 'topology_m1h1c1' for <Function 'test_ticket48325'>>

@pytest.fixture(scope="module")
def topology_m1h1c1(request):
"""Create Replication Deployment with one master, one consumer and one hub"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating hub 1...
if DEBUGGING:
hub1 = DirSrv(verbose=True)
else:
hub1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_HUB_1
args_instance[SER_PORT] = PORT_HUB_1
args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_hub = args_instance.copy()
hub1.allocate(args_hub)
instance_hub1 = hub1.exists()
if instance_hub1:
hub1.delete()
hub1.create()
hub1.open()
hub1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_HUB,
replicaId=REPLICAID_HUB_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
hub1.stop()
consumer1.stop()
else:
master1.delete()
hub1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to hub 1
properties = {RA_NAME: 'meTo_{}:{}'.format(hub1.host, str(hub1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_h1_agmt = master1.agreement.create(suffix=SUFFIX, host=hub1.host,
port=hub1.port, properties=properties)
if not m1_h1_agmt:
log.fatal("Fail to create a master -> hub replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_h1_agmt))

# Creating agreement from hub 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host,
> port=consumer1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:373:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xe6b66d0>
func = <built-in method result4 of LDAP object at 0x29020a8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}YpL0PVDq7Zg0Dm7vJot02ClaWksQ0KrXI6JU5XNR39qiXoEL5109Zb7yD9IsWbhpE70jSCE0o/wz6tJaZPFiSB6YT8MhYAnj INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}vswE7kHzj2gZnFL8gRoq3CEan4lkxJoFFd80dUMmx9R1wCyWUGljkBJxmApvsDUo092+BGub2TnNd9uhlmnsuUzKHp4tHGM7 INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}ttKVQnpCmIZzy4IuLlS5vi0v5TCvpr3IvXYbeGtji4kRYt4PAuYrOkWuUqhfsBedz6rLq65gn9Ef1qLtdepexKqI/GXy45lZ
Error tickets/ticket48342_test.py::test_ticket4026::setup 8.62
request = <SubRequest 'topology_m3' for <Function 'test_ticket4026'>>

@pytest.fixture(scope="module")
def topology_m3(request):
"""Create Replication Deployment with three masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
else:
master1.delete()
master2.delete()
master3.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_{}:{}'.format(master3.host, str(master3.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:610:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xe6b5190>
func = <built-in method result4 of LDAP object at 0x34a6468>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}jVGzpKg2U7m5BDITZuFxbYyQsIV6SlAGx/Fd6cKCcec3hvoTd8grYZFg7fGWZwmo9aBS9FCQWUfbaFE/2VtD1lkjVA+rE5b4 INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}Onp4Z11oht6jnf/GI47n9YvWzHzpNXgvgLxnOW7Z1mwnI0knrtWORYR8h841WDEbezlvMZXokvlSC3ua+SPb8v3IbXYwAAkc INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}Xf5POO8uKS5I3wnnkWnuKBQ2YR3uvlqAeiYwBOhjhqkN4EH1gdt9OGbx6up7Ol5R/EuVZCvdKPep01w4f/QwZ+Jud/NFomya
Error tickets/ticket48362_test.py::test_ticket48362::setup 5.68
request = <SubRequest 'topology_m2' for <Function 'test_ticket48362'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8cf06d0>
func = <built-in method result4 of LDAP object at 0x2943b48>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}AMjI2xwGqFG/DEeXu+rra9mSchTpn45gRJSf2d/ziNhK8AwAhkF1J7FGgJvL8NoM027YYAio5Rs+RfEesWnPYDW1rVh280x3 INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}bLKNZhH+fBgsOhwyGTaAbvUux5yU7kUsZSaclktkj28rYAwUw7M/dcODOV9Op123L2Sll770YAUu2fL83Vk/yGCo++M61kXS
Error tickets/ticket48755_test.py::test_ticket48755::setup 5.68
request = <SubRequest 'topology_m2' for <Function 'test_ticket48755'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xea992d0>
func = <built-in method result4 of LDAP object at 0x2902b20>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}DmrzlFMrzOr6xRtbID2LaFIXJtQNzWO6gs7UpFmznVBMTvAIPx0DgNcKL0/15g3/DHTxy5YeaJXdo5Uiuig2RL5sqCvatPtN INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}TUMBbRSZaGIu2fLHefdAiR1ZeV0KavLUUldQ++1Gyfq1cMtXaabfN1KCJx9lNArNZeebaGAf/Y+Eqp5jki9NaU2BySSak0Ff
Error tickets/ticket48784_test.py::test_ticket48784::setup 5.73
request = <SubRequest 'topology_m2' for <Function 'test_ticket48784'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xeaa0d50>
func = <built-in method result4 of LDAP object at 0x2902f58>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}FmlSL/MALHmAHGxMY8zo7c01HeV7X9VjpWkSUVLnHIirRFFv4LLPFJHwnd+0/cS8zSjKCqgmpCqpYCOOquqY7QQ9L4klEXXS INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}U7R6XCbKMYe28pxZebUEPVIVL70LT3lrHnepwSWHCADjOWi3ciW4AN2YFPI6eJO8YSwR+R+reGfmI+4wywMUv7q45QHMXXti
Error tickets/ticket48799_test.py::test_ticket48799::setup 26.85
request = <SubRequest 'topology_m1c1' for <Function 'test_ticket48799'>>

@pytest.fixture(scope="module")
def topology_m1c1(request):
"""Create Replication Deployment with one master and one consumer"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating consumer 1...
if DEBUGGING:
consumer1 = DirSrv(verbose=True)
else:
consumer1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_CONSUMER_1
args_instance[SER_PORT] = PORT_CONSUMER_1
args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_consumer = args_instance.copy()
consumer1.allocate(args_consumer)
instance_consumer1 = consumer1.exists()
if instance_consumer1:
consumer1.delete()
consumer1.create()
consumer1.open()
consumer1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER,
replicaId=CONSUMER_REPLICAID)

def fin():
if DEBUGGING:
master1.stop()
consumer1.stop()
else:
master1.delete()
consumer1.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to consumer 1
properties = {RA_NAME: 'meTo_{}:{}'.format(consumer1.host, str(consumer1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_c1_agmt = master1.agreement.create(suffix=SUFFIX, host=consumer1.host,
port=consumer1.port, properties=properties)
if not m1_c1_agmt:
log.fatal("Fail to create a hub -> consumer replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_c1_agmt))

# Allow the replicas to get situated with the new agreements...
time.sleep(5)

# Initialize all the agreements
master1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
master1.waitForReplInit(m1_c1_agmt)

# Check replication is working...
if master1.testReplication(DEFAULT_SUFFIX, consumer1):
log.info('Replication is working.')
else:
log.fatal('Replication is not working.')
> assert False
E AssertionError

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:270: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ('Update failed: status', "-1 - LDAP error: Can't contact LDAP server") ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}ttdY/zRdixo06757K2k78WUGK8d+nO4bqhBNseR07wII2rhwvAs5jc/AmkBW0uzXz9WCmWKyeTu3IQDDSMsUQX27nuqMyTId INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}guRuJOWTDV2ZL+TCmQEtFtDYzdCMGrUlkG+ynmfA7aRGNipUIWMDYEIRol3TrisuYA7Bl1wOtWzf2ad4qU2TaKq9E4ybNo64 INFO:lib389:Starting total init cn=meTo_localhost:39201,cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config CRITICAL:lib389:testReplication() failed to modify (dc=example,dc=com), error ({'desc': "Can't contact LDAP server"}) CRITICAL:lib389.topologies:Replication is not working.
Error tickets/ticket48916_test.py::test_ticket48916::setup 5.71
request = <SubRequest 'topology_m2' for <Function 'test_ticket48916'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8c21050>
func = <built-in method result4 of LDAP object at 0x34a63c8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}Zu6JREq+ccgKOBBA+i6ICVXPgP0e9Bbt/X1Y0PrPMDkEOMOgp/PRUZGVDgDnyQ+Zto9U7Xs9UIxTcGlo9IGS0cQv41fi8GX7 INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}lSpQvtT4USLST+zvdnAHP2feK/9fcGJoNLZJ8oAcyi0rY9jAcWVMlYr65+yN3pyUJrB0JPlv9qMQ8kGe6u960XdgYGbC7zkh
Error tickets/ticket49008_test.py::test_ticket49008::setup 8.58
request = <SubRequest 'T' for <Function 'test_ticket49008'>>

@pytest.fixture(scope="module")
def topology_m3(request):
"""Create Replication Deployment with three masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
else:
master1.delete()
master2.delete()
master3.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_{}:{}'.format(master3.host, str(master3.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:610:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8c4e850>
func = <built-in method result4 of LDAP object at 0x31ad300>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}nP+s9+MSymU0w0cAR9w21mgliVAxbRF3jxEcQIMoxDXPJi8uS6fPt6VKtyEdoBopvxkLgEcjIwlSgQ8LGqj3pi1xtPflbzFd INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}N0ily+Zpk2CUd8M/UdSdkkQ89Ts/QIIcnYw1eEtGnVLiN4uffv8o+1oDY6VDZZbz9sm6gUctqexqjXcrUOb26iGfYi14KDQk INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}T6OaMftrYcy3mOfdH3eOnv84U0UUVcosnO8VvRLruJULQ6ECBzj4HHvXTLhlOuVU7kME3nms4JEw5sHqv05ZfRO/fzWNAPi4
Error tickets/ticket49020_test.py::test_ticket49020::setup 8.59
request = <SubRequest 'T' for <Function 'test_ticket49020'>>

@pytest.fixture(scope="module")
def topology_m3(request):
"""Create Replication Deployment with three masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

# Creating master 3...
if DEBUGGING:
master3 = DirSrv(verbose=True)
else:
master3 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_3
args_instance[SER_PORT] = PORT_MASTER_3
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master3.allocate(args_master)
instance_master3 = master3.exists()
if instance_master3:
master3.delete()
master3.create()
master3.open()
master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_3)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
master3.stop()
else:
master1.delete()
master2.delete()
master3.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 1 to master 3
properties = {RA_NAME: 'meTo_{}:{}'.format(master3.host, str(master3.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
port=master3.port, properties=properties)
if not m1_m3_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m3_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:610:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8fa3bd0>
func = <built-in method result4 of LDAP object at 0x2b43030>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}P0W+VOXGD3N327ebvgkIJ66Ba/fyzeozvgT0eGDwyyDInvdTv1D01UXrRbUGsZ1dgVRZOgaxwKoYRQjoSXjJcq31UNIGEQrd INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}7+PRQHUkVhH4P8zG7fUebPpJMeeI+pcQBkS1DWKwgz1jFim704AnqSZWxJyMsYSox6pyoNOQQ971tBIYK7J3dTQcYOQsOwji INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}GbbKpEmtcssvQjW/6QqXWr7SR8gC9RAxwAtBU81rZjybwA5NGIOSqwMtU9XC0rRcXkPXzZKoj/SJ2lhiYqekkbHNLC+VcQoF
Error tickets/ticket49073_test.py::test_ticket49073::setup 5.72
request = <SubRequest 'topology_m2' for <Function 'test_ticket49073'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8b45c10>
func = <built-in method result4 of LDAP object at 0x1dbf9b8>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}jAxOSUQKo1ePibtX5XBGZwVK4Gs51PfkMQ3k6Fkl/K6nHP691gMNCCpt2k4KLdVcRVpkX/N4q3QtWCFYkuzQS3EpUX+dLH7r INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}AMwt0iZrSrxIJcbaBBFE2T/G5jftJwfwrCzfpGzDySlML4jUBAAyJwoVnLKsm9BXoogvNMVH1aH2rcTb4iRJz7sgtl6f32hx
Error tickets/ticket49121_test.py::test_ticket49121::setup 5.71
request = <SubRequest 'topology_m2' for <Function 'test_ticket49121'>>

@pytest.fixture(scope="module")
def topology_m2(request):
"""Create Replication Deployment with two masters"""

# Creating master 1...
if DEBUGGING:
master1 = DirSrv(verbose=True)
else:
master1 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_1
args_instance[SER_PORT] = PORT_MASTER_1
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master1.allocate(args_master)
instance_master1 = master1.exists()
if instance_master1:
master1.delete()
master1.create()
master1.open()
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_1)

# Creating master 2...
if DEBUGGING:
master2 = DirSrv(verbose=True)
else:
master2 = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_MASTER_2
args_instance[SER_PORT] = PORT_MASTER_2
args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
args_master = args_instance.copy()
master2.allocate(args_master)
instance_master2 = master2.exists()
if instance_master2:
master2.delete()
master2.create()
master2.open()
master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
replicaId=REPLICAID_MASTER_2)

def fin():
if DEBUGGING:
master1.stop()
master2.stop()
else:
master1.delete()
master2.delete()

request.addfinalizer(fin)

# Create all the agreements
# Creating agreement from master 1 to master 2
properties = {RA_NAME: 'meTo_{}:{}'.format(master2.host, str(master2.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
port=master2.port, properties=properties)
if not m1_m2_agmt:
log.fatal("Fail to create a master -> master replica agreement")
sys.exit(1)
log.debug("{} created".format(m1_m2_agmt))

# Creating agreement from master 2 to master 1
properties = {RA_NAME: 'meTo_{}:{}'.format(master1.host, str(master1.port)),
RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
> port=master1.port, properties=properties)

/mnt/tests/rhds/tests/upstream/src/lib389/lib389/topologies.py:476:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/agreement.py:454: in create
replica_entries = self.conn.replica.list(suffix)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/replica.py:161: in list
ents = self.conn.search_s(base, ldap.SCOPE_SUBTREE, filtr)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:597: in search_s
return self.search_ext_s(base,scope,filterstr,attrlist,attrsonly,None,None,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:591: in search_ext_s
return self.result(msgid,all=1,timeout=timeout)[1]
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xe82ced0>
func = <built-in method result4 of LDAP object at 0x34a6940>
args = (10, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}k6R5jHAlaNZlrM9ZVwgTqEcMwepLItd7CchnDp9msfrRp3h86mXsnoNbAe812ry4xWnjzM/ihWmGh0NoK2SEL6kxB68loVdn INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}Fd+iJiQqlkoc+C1mnGPJiqEM8Mx0Llu3/KTtV/yS1/fpNoc+0PUWHCszRo60Vv68kQDmXPTqXANI534FGxLAE0+2wO5mkfZC
Failed suites/basic/basic_test.py::test_basic_acl 0.02
topology_st = <lib389.topologies.TopologyMain object at 0x5adcf50>
import_example_ldif = None

def test_basic_acl(topology_st, import_example_ldif):
"""Run some basic access control(ACL) tests"""

log.info('Running test_basic_acl...')

DENY_ACI = ('(targetattr = "*") (version 3.0;acl "deny user";deny (all)' +
'(userdn = "ldap:///' + USER1_DN + '");)')

#
# Add two users
#
try:
topology_st.standalone.add_s(Entry((USER1_DN,
{'objectclass': "top extensibleObject".split(),
'sn': '1',
'cn': 'user 1',
'uid': 'user1',
'userpassword': PASSWORD})))
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN
+ ': error ' + e.message['desc'])
assert False

try:
topology_st.standalone.add_s(Entry((USER2_DN,
{'objectclass': "top extensibleObject".split(),
'sn': '2',
'cn': 'user 2',
'uid': 'user2',
'userpassword': PASSWORD})))
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN
+ ': error ' + e.message['desc'])
assert False

#
# Add an aci that denies USER1 from doing anything,
# and also set the default anonymous access
#
try:
topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', DENY_ACI)])
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to add DENY ACI: error ' + e.message['desc'])
assert False

#
# Make sure USER1_DN can not search anything, but USER2_dn can...
#
try:
topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_basic_acl: Failed to bind as user1, error: ' + e.message['desc'])
> assert False
E assert False

suites/basic/basic_test.py:369: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test:Running test_basic_acl... CRITICAL:tests.suites.basic.basic_test:test_basic_acl: Failed to bind as user1, error: Can't contact LDAP server
Failed suites/basic/basic_test.py::test_basic_searches 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x5adcf50>
import_example_ldif = None

def test_basic_searches(topology_st, import_example_ldif):
"""The search results are gathered from testing with Example.ldif"""

log.info('Running test_basic_searches...')

filters = (('(uid=scarter)', 1),
('(uid=tmorris*)', 1),
('(uid=*hunt*)', 4),
('(uid=*cope)', 2),
('(mail=*)', 150),
('(roomnumber>=4000)', 35),
('(roomnumber<=4000)', 115),
('(&(roomnumber>=4000)(roomnumber<=4500))', 18),
('(!(l=sunnyvale))', 120),
('(&(uid=t*)(l=santa clara))', 7),
('(|(uid=k*)(uid=r*))', 18),
('(|(uid=t*)(l=sunnyvale))', 50),
('(&(!(uid=r*))(ou=people))', 139),
('(&(uid=m*)(l=sunnyvale)(ou=people)(mail=*example*)(roomNumber=*))', 3),
('(&(|(uid=m*)(l=santa clara))(roomNumber=22*))', 5),
('(&(|(uid=m*)(l=santa clara))(roomNumber=22*)(!(roomnumber=2254)))', 4))

for (search_filter, search_result) in filters:
try:
entries = topology_st.standalone.search_s(DEFAULT_SUFFIX,
ldap.SCOPE_SUBTREE,
search_filter)
if len(entries) != search_result:
log.fatal('test_basic_searches: An incorrect number of entries\
was returned from filter (%s): (%d) expected (%d)' %
(search_filter, len(entries), search_result))
assert False
except ldap.LDAPError as e:
log.fatal('Search failed: ' + e.message['desc'])
> assert False
E assert False

suites/basic/basic_test.py:481: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test:Running test_basic_searches... CRITICAL:tests.suites.basic.basic_test:Search failed: Can't contact LDAP server
Failed suites/basic/basic_test.py::test_basic_referrals 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x5adcf50>
import_example_ldif = None

def test_basic_referrals(topology_st, import_example_ldif):
"""Set the server to referral mode,
and make sure we recive the referal error(10)
"""

log.info('Running test_basic_referrals...')

SUFFIX_CONFIG = 'cn="dc=example,dc=com",cn=mapping tree,cn=config'

#
# Set the referral, adn the backend state
#
try:
topology_st.standalone.modify_s(SUFFIX_CONFIG,
[(ldap.MOD_REPLACE,
'nsslapd-referral',
'ldap://localhost.localdomain:389/o%3dnetscaperoot')])
except ldap.LDAPError as e:
log.fatal('test_basic_referrals: Failed to set referral: error ' + e.message['desc'])
> assert False
E assert False

suites/basic/basic_test.py:505: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test:Running test_basic_referrals... CRITICAL:tests.suites.basic.basic_test:test_basic_referrals: Failed to set referral: error Can't contact LDAP server
Failed suites/dynamic_plugins/dynamic_plugins_test.py::test_dynamic_plugins 0.04
topology_st = <lib389.topologies.TopologyMain object at 0x5cd1550>

def test_dynamic_plugins(topology_st):
"""
Test Dynamic Plugins - exercise each plugin and its main features, while
changing the configuration without restarting the server.

Need to test: functionality, stability, and stress. These tests need to run
with replication disabled, and with replication setup with a
second instance. Then test if replication is working, and we have
same entries on each side.

Functionality - Make sure that as configuration changes are made they take
effect immediately. Cross plugin interaction (e.g. automember/memberOf)
needs to tested, as well as plugin tasks. Need to test plugin
config validation(dependencies, etc).

Memory Corruption - Restart the plugins many times, and in different orders and test
functionality, and stability. This will excerise the internal
plugin linked lists, dse callbacks, and task handlers.

Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
Restart various plugins while these operations are going on. Perform this test
5 times(stress_max_run).
"""

REPLICA_PORT = 33334
RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
master_maxcsn = 0
replica_maxcsn = 0
msg = ' (no replication)'
replication_run = False
stress_max_runs = 5

# First enable dynamic plugins
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
except ldap.LDAPError as e:
log.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
assert False

# Test that critical plugins can be updated even though the change might not be applied
try:
topology_st.standalone.modify_s(DN_LDBM, [(ldap.MOD_REPLACE, 'description', 'test')])
except ldap.LDAPError as e:
log.fatal('Failed to apply change to critical plugin' + e.message['desc'])
assert False

while 1:
#
# First run the tests with replication disabled, then rerun them with replication set up
#

############################################################################
# Test plugin functionality
############################################################################

log.info('####################################################################')
log.info('Testing Dynamic Plugins Functionality' + msg + '...')
log.info('####################################################################\n')

> plugin_tests.test_all_plugins(topology_st.standalone)

suites/dynamic_plugins/dynamic_plugins_test.py:93:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
suites/dynamic_plugins/plugin_tests.py:2477: in test_all_plugins
func(inst, args)
suites/dynamic_plugins/plugin_tests.py:125: in test_acctpolicy
inst.plugins.enable(name=PLUGIN_ACCT_POLICY)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/plugins.py:170: in enable
plugin.enable()
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/plugins.py:40: in enable
self.set('nsslapd-pluginEnabled', 'on')
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/_mapped_object.py:193: in set
return self._instance.modify_s(self._dn, [(action, key, value)])
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:402: in modify_s
return self.result(msgid,all=1,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x5cd3210>
func = <built-in method result4 of LDAP object at 0x295ad00>
args = (7, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.dynamic_plugins.dynamic_plugins_test:#################################################################### INFO:tests.suites.dynamic_plugins.dynamic_plugins_test:Testing Dynamic Plugins Functionality (no replication)... INFO:tests.suites.dynamic_plugins.dynamic_plugins_test:####################################################################
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[-True-oper_attr_list1] 0.02
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = '', search_suffix = ''
regular_user = True
oper_attr_list = ['createTimestamp', 'creatorsName', 'modifiersName', 'modifyTimestamp', 'namingContexts', 'nsBackendSuffix', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
> topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

suites/filter/rfc3673_all_oper_attrs_test.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:223: in simple_bind_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method result4 of LDAP object at 0x2967558>
args = (11, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[-True-oper_attr_list1-*] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = '*', search_suffix = ''
regular_user = True
oper_attr_list = ['createTimestamp', 'creatorsName', 'modifiersName', 'modifyTimestamp', 'namingContexts', 'nsBackendSuffix', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
> topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

suites/filter/rfc3673_all_oper_attrs_test.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('uid=all_attrs_test,ou=people,dc=example,dc=com', 'all_attrs_test', None, None)
kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[-True-oper_attr_list1-objectClass] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = 'objectClass', search_suffix = ''
regular_user = True
oper_attr_list = ['createTimestamp', 'creatorsName', 'modifiersName', 'modifyTimestamp', 'namingContexts', 'nsBackendSuffix', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
> topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

suites/filter/rfc3673_all_oper_attrs_test.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('uid=all_attrs_test,ou=people,dc=example,dc=com', 'all_attrs_test', None, None)
kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[ou=people,dc=example,dc=com-False-oper_attr_list2] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = ''
search_suffix = 'ou=people,dc=example,dc=com', regular_user = False
oper_attr_list = ['aci', 'createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifiersName', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
else:
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/filter/rfc3673_all_oper_attrs_test.py:136:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[ou=people,dc=example,dc=com-False-oper_attr_list2-*] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = '*'
search_suffix = 'ou=people,dc=example,dc=com', regular_user = False
oper_attr_list = ['aci', 'createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifiersName', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
else:
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/filter/rfc3673_all_oper_attrs_test.py:136:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[ou=people,dc=example,dc=com-False-oper_attr_list2-objectClass] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = 'objectClass'
search_suffix = 'ou=people,dc=example,dc=com', regular_user = False
oper_attr_list = ['aci', 'createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifiersName', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
else:
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/filter/rfc3673_all_oper_attrs_test.py:136:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[ou=people,dc=example,dc=com-True-oper_attr_list3] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = ''
search_suffix = 'ou=people,dc=example,dc=com', regular_user = True
oper_attr_list = ['createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifyTimestamp', 'nsUniqueId', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
> topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

suites/filter/rfc3673_all_oper_attrs_test.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('uid=all_attrs_test,ou=people,dc=example,dc=com', 'all_attrs_test', None, None)
kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[ou=people,dc=example,dc=com-True-oper_attr_list3-*] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = '*'
search_suffix = 'ou=people,dc=example,dc=com', regular_user = True
oper_attr_list = ['createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifyTimestamp', 'nsUniqueId', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
> topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

suites/filter/rfc3673_all_oper_attrs_test.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('uid=all_attrs_test,ou=people,dc=example,dc=com', 'all_attrs_test', None, None)
kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[ou=people,dc=example,dc=com-True-oper_attr_list3-objectClass] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = 'objectClass'
search_suffix = 'ou=people,dc=example,dc=com', regular_user = True
oper_attr_list = ['createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifyTimestamp', 'nsUniqueId', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
> topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

suites/filter/rfc3673_all_oper_attrs_test.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('uid=all_attrs_test,ou=people,dc=example,dc=com', 'all_attrs_test', None, None)
kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[uid=all_attrs_test,ou=people,dc=example,dc=com-False-oper_attr_list4] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = ''
search_suffix = 'uid=all_attrs_test,ou=people,dc=example,dc=com'
regular_user = False
oper_attr_list = ['createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifiersName', 'modifyTimestamp', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
else:
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/filter/rfc3673_all_oper_attrs_test.py:136:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[uid=all_attrs_test,ou=people,dc=example,dc=com-False-oper_attr_list4-*] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = '*'
search_suffix = 'uid=all_attrs_test,ou=people,dc=example,dc=com'
regular_user = False
oper_attr_list = ['createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifiersName', 'modifyTimestamp', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
else:
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/filter/rfc3673_all_oper_attrs_test.py:136:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[uid=all_attrs_test,ou=people,dc=example,dc=com-False-oper_attr_list4-objectClass] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = 'objectClass'
search_suffix = 'uid=all_attrs_test,ou=people,dc=example,dc=com'
regular_user = False
oper_attr_list = ['createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifiersName', 'modifyTimestamp', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
else:
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/filter/rfc3673_all_oper_attrs_test.py:136:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[uid=all_attrs_test,ou=people,dc=example,dc=com-True-oper_attr_list5] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = ''
search_suffix = 'uid=all_attrs_test,ou=people,dc=example,dc=com'
regular_user = True
oper_attr_list = ['createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifyTimestamp', 'nsUniqueId', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
> topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

suites/filter/rfc3673_all_oper_attrs_test.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('uid=all_attrs_test,ou=people,dc=example,dc=com', 'all_attrs_test', None, None)
kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[uid=all_attrs_test,ou=people,dc=example,dc=com-True-oper_attr_list5-*] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = '*'
search_suffix = 'uid=all_attrs_test,ou=people,dc=example,dc=com'
regular_user = True
oper_attr_list = ['createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifyTimestamp', 'nsUniqueId', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
> topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

suites/filter/rfc3673_all_oper_attrs_test.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('uid=all_attrs_test,ou=people,dc=example,dc=com', 'all_attrs_test', None, None)
kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[uid=all_attrs_test,ou=people,dc=example,dc=com-True-oper_attr_list5-objectClass] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = 'objectClass'
search_suffix = 'uid=all_attrs_test,ou=people,dc=example,dc=com'
regular_user = True
oper_attr_list = ['createTimestamp', 'creatorsName', 'entrydn', 'entryid', 'modifyTimestamp', 'nsUniqueId', ...]

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
> topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

suites/filter/rfc3673_all_oper_attrs_test.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('uid=all_attrs_test,ou=people,dc=example,dc=com', 'all_attrs_test', None, None)
kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[cn=config-False-oper_attr_list6] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = '', search_suffix = 'cn=config'
regular_user = False, oper_attr_list = ['numSubordinates', 'passwordHistory']

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
else:
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/filter/rfc3673_all_oper_attrs_test.py:136:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[cn=config-False-oper_attr_list6-*] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = '*', search_suffix = 'cn=config'
regular_user = False, oper_attr_list = ['numSubordinates', 'passwordHistory']

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
else:
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/filter/rfc3673_all_oper_attrs_test.py:136:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[cn=config-False-oper_attr_list6-objectClass] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x6423f10>
test_user = None, user_aci = None, add_attr = 'objectClass'
search_suffix = 'cn=config', regular_user = False
oper_attr_list = ['numSubordinates', 'passwordHistory']

@pytest.mark.parametrize('add_attr', ['', '*', 'objectClass'])
@pytest.mark.parametrize('search_suffix,regular_user,oper_attr_list',
TEST_PARAMS)
def test_search_basic(topology_st, test_user, user_aci, add_attr,
search_suffix, regular_user, oper_attr_list):
"""Verify that you can get all expected operational attributes
by a Search Request [RFC2251] with '+' (ASCII 43) filter.
Please see: https://tools.ietf.org/html/rfc3673

:ID: 14c66bc2-28e1-4f5f-893e-508e0f720f8c
:feature: Filter
:setup: Standalone instance, test user for binding,
deny one attribute aci for that user
:steps: 1. Bind as regular user or Directory Manager
2. Search with '+' filter and with additionaly
'objectClass' and '*' attrs too
:assert: All expected values were returned, not more
"""

if regular_user:
topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)
else:
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/filter/rfc3673_all_oper_attrs_test.py:136:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x6423c10>
func = <built-in method simple_bind of LDAP object at 0x2967558>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed suites/paged_results/paged_results_test.py::test_search_success[6-5] 0.03
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, page_size = 6, users_num = 5

@pytest.mark.parametrize("page_size,users_num",
[(6, 5), (5, 5), (5, 25)])
def test_search_success(topology_st, test_user, page_size, users_num):
"""Verify that search with a simple paged results control
returns all entries it should without errors.

:id: ddd15b70-64f1-4a85-a793-b24761e50354
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Search through added users with a simple paged control
:assert: All users should be found
"""

users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']

try:
log.info('Set user bind')
topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')

all_results = paged_search(topology_st, DEFAULT_SUFFIX, [req_ctrl],
search_flt, searchreq_attrlist)

log.info('%d results' % len(all_results))
assert len(all_results) == len(users_list)
finally:
log.info('Set Directory Manager bind back (test_search_success)')
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/paged_results/paged_results_test.py:276:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x614f610>
func = <built-in method simple_bind of LDAP object at 0x2b43328>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding user uid=simplepaged_test,dc=example,dc=com ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 5 users INFO:tests.suites.paged_results.paged_results_test:Set user bind INFO:tests.suites.paged_results.paged_results_test:Set Directory Manager bind back (test_search_success)
Failed suites/paged_results/paged_results_test.py::test_search_success[5-5] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, page_size = 5, users_num = 5

@pytest.mark.parametrize("page_size,users_num",
[(6, 5), (5, 5), (5, 25)])
def test_search_success(topology_st, test_user, page_size, users_num):
"""Verify that search with a simple paged results control
returns all entries it should without errors.

:id: ddd15b70-64f1-4a85-a793-b24761e50354
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Search through added users with a simple paged control
:assert: All users should be found
"""

> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:259:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 5, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 5 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00752,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_success[5-25] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, page_size = 5, users_num = 25

@pytest.mark.parametrize("page_size,users_num",
[(6, 5), (5, 5), (5, 25)])
def test_search_success(topology_st, test_user, page_size, users_num):
"""Verify that search with a simple paged results control
returns all entries it should without errors.

:id: ddd15b70-64f1-4a85-a793-b24761e50354
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Search through added users with a simple paged control
:assert: All users should be found
"""

> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:259:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 25, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 25 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00267,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_limits_fail[50-200-cn=config,cn=ldbm database,cn=plugins,cn=config-nsslapd-idlistscanlimit-100-UNWILLING_TO_PERFORM] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, page_size = 50, users_num = 200
suffix = 'cn=config,cn=ldbm database,cn=plugins,cn=config'
attr_name = 'nsslapd-idlistscanlimit', attr_value = '100'
expected_err = <class 'ldap.UNWILLING_TO_PERFORM'>

@pytest.mark.parametrize("page_size,users_num,suffix,attr_name,attr_value,expected_err", [
(50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100',
ldap.UNWILLING_TO_PERFORM),
(5, 15, DN_CONFIG, 'nsslapd-timelimit', '20',
ldap.UNAVAILABLE_CRITICAL_EXTENSION),
(21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20',
ldap.SIZELIMIT_EXCEEDED),
(21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5',
ldap.SIZELIMIT_EXCEEDED),
(5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20',
ldap.ADMINLIMIT_EXCEEDED)])
def test_search_limits_fail(topology_st, test_user, page_size, users_num,
suffix, attr_name, attr_value, expected_err):
"""Verify that search with a simple paged results control
throws expected exceptoins when corresponding limits are
exceeded.

:id: e3067107-bd6d-493d-9989-3e641a9337b0
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Set limit attribute to the value that will cause
an expected exception
3. Search through added users with a simple paged control
:assert: Should fail with appropriate exception
"""

> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:308:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 200, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 200 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00684,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_limits_fail[5-15-cn=config-nsslapd-timelimit-20-UNAVAILABLE_CRITICAL_EXTENSION] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, page_size = 5, users_num = 15, suffix = 'cn=config'
attr_name = 'nsslapd-timelimit', attr_value = '20'
expected_err = <class 'ldap.UNAVAILABLE_CRITICAL_EXTENSION'>

@pytest.mark.parametrize("page_size,users_num,suffix,attr_name,attr_value,expected_err", [
(50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100',
ldap.UNWILLING_TO_PERFORM),
(5, 15, DN_CONFIG, 'nsslapd-timelimit', '20',
ldap.UNAVAILABLE_CRITICAL_EXTENSION),
(21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20',
ldap.SIZELIMIT_EXCEEDED),
(21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5',
ldap.SIZELIMIT_EXCEEDED),
(5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20',
ldap.ADMINLIMIT_EXCEEDED)])
def test_search_limits_fail(topology_st, test_user, page_size, users_num,
suffix, attr_name, attr_value, expected_err):
"""Verify that search with a simple paged results control
throws expected exceptoins when corresponding limits are
exceeded.

:id: e3067107-bd6d-493d-9989-3e641a9337b0
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Set limit attribute to the value that will cause
an expected exception
3. Search through added users with a simple paged control
:assert: Should fail with appropriate exception
"""

> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:308:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 15, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 15 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00499,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_limits_fail[21-50-cn=config-nsslapd-sizelimit-20-SIZELIMIT_EXCEEDED] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, page_size = 21, users_num = 50, suffix = 'cn=config'
attr_name = 'nsslapd-sizelimit', attr_value = '20'
expected_err = <class 'ldap.SIZELIMIT_EXCEEDED'>

@pytest.mark.parametrize("page_size,users_num,suffix,attr_name,attr_value,expected_err", [
(50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100',
ldap.UNWILLING_TO_PERFORM),
(5, 15, DN_CONFIG, 'nsslapd-timelimit', '20',
ldap.UNAVAILABLE_CRITICAL_EXTENSION),
(21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20',
ldap.SIZELIMIT_EXCEEDED),
(21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5',
ldap.SIZELIMIT_EXCEEDED),
(5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20',
ldap.ADMINLIMIT_EXCEEDED)])
def test_search_limits_fail(topology_st, test_user, page_size, users_num,
suffix, attr_name, attr_value, expected_err):
"""Verify that search with a simple paged results control
throws expected exceptoins when corresponding limits are
exceeded.

:id: e3067107-bd6d-493d-9989-3e641a9337b0
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Set limit attribute to the value that will cause
an expected exception
3. Search through added users with a simple paged control
:assert: Should fail with appropriate exception
"""

> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:308:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 50, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 50 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00604,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_limits_fail[21-50-cn=config-nsslapd-pagedsizelimit-5-SIZELIMIT_EXCEEDED] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, page_size = 21, users_num = 50, suffix = 'cn=config'
attr_name = 'nsslapd-pagedsizelimit', attr_value = '5'
expected_err = <class 'ldap.SIZELIMIT_EXCEEDED'>

@pytest.mark.parametrize("page_size,users_num,suffix,attr_name,attr_value,expected_err", [
(50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100',
ldap.UNWILLING_TO_PERFORM),
(5, 15, DN_CONFIG, 'nsslapd-timelimit', '20',
ldap.UNAVAILABLE_CRITICAL_EXTENSION),
(21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20',
ldap.SIZELIMIT_EXCEEDED),
(21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5',
ldap.SIZELIMIT_EXCEEDED),
(5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20',
ldap.ADMINLIMIT_EXCEEDED)])
def test_search_limits_fail(topology_st, test_user, page_size, users_num,
suffix, attr_name, attr_value, expected_err):
"""Verify that search with a simple paged results control
throws expected exceptoins when corresponding limits are
exceeded.

:id: e3067107-bd6d-493d-9989-3e641a9337b0
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Set limit attribute to the value that will cause
an expected exception
3. Search through added users with a simple paged control
:assert: Should fail with appropriate exception
"""

> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:308:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 50, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 50 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00858,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_limits_fail[5-50-cn=config,cn=ldbm database,cn=plugins,cn=config-nsslapd-lookthroughlimit-20-ADMINLIMIT_EXCEEDED] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, page_size = 5, users_num = 50
suffix = 'cn=config,cn=ldbm database,cn=plugins,cn=config'
attr_name = 'nsslapd-lookthroughlimit', attr_value = '20'
expected_err = <class 'ldap.ADMINLIMIT_EXCEEDED'>

@pytest.mark.parametrize("page_size,users_num,suffix,attr_name,attr_value,expected_err", [
(50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100',
ldap.UNWILLING_TO_PERFORM),
(5, 15, DN_CONFIG, 'nsslapd-timelimit', '20',
ldap.UNAVAILABLE_CRITICAL_EXTENSION),
(21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20',
ldap.SIZELIMIT_EXCEEDED),
(21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5',
ldap.SIZELIMIT_EXCEEDED),
(5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20',
ldap.ADMINLIMIT_EXCEEDED)])
def test_search_limits_fail(topology_st, test_user, page_size, users_num,
suffix, attr_name, attr_value, expected_err):
"""Verify that search with a simple paged results control
throws expected exceptoins when corresponding limits are
exceeded.

:id: e3067107-bd6d-493d-9989-3e641a9337b0
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Set limit attribute to the value that will cause
an expected exception
3. Search through added users with a simple paged control
:assert: Should fail with appropriate exception
"""

> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:308:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 50, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 50 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00761,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_sort_success 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None

def test_search_sort_success(topology_st, test_user):
"""Verify that search with a simple paged results control
and a server side sort control returns all entries
it should without errors.

:id: 17d8b150-ed43-41e1-b80f-ee9b4ce45155
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Search through added users with a simple paged control
and a server side sort control
:assert: All users should be found and sorted
"""

users_num = 50
page_size = 5
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:392:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 50, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 50 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00511,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_abandon 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None

def test_search_abandon(topology_st, test_user):
"""Verify that search with simple paged results control
can be abandon

:id: 0008538b-7585-4356-839f-268828066978
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Search through added users with a simple paged control
3. Abandon the search
:assert: It will throw an ldap.TIMEOUT exception, while trying
to get the rest of the search results
"""

users_num = 10
page_size = 2
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:437:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 10, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 10 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00776,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_with_timelimit 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None

def test_search_with_timelimit(topology_st, test_user):
"""Verify that after performing multiple simple paged searches
to completion, each with a timelimit, it wouldn't fail, if we sleep
for a time more than the timelimit.

:id: 6cd7234b-136c-419f-bf3e-43aa73592cff
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Search through added users with a simple paged control
and timelimit set to 5
3. When the returned cookie is empty, wait 10 seconds
4. Perform steps 2 and 3 three times in a row
:assert: No error happens
"""

users_num = 100
page_size = 50
timelimit = 5
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:487:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 100, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 100 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00239,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_dns_ip_aci[dns = "localhost.localdomain"] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, aci_subject = 'dns = "localhost.localdomain"'

@pytest.mark.parametrize('aci_subject',
('dns = "localhost.localdomain"',
'ip = "::1" or ip = "127.0.0.1"'))
def test_search_dns_ip_aci(topology_st, test_user, aci_subject):
"""Verify that after performing multiple simple paged searches
to completion on the suffix with DNS or IP based ACI

:id: bbfddc46-a8c8-49ae-8c90-7265d05b22a9
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Back up and remove all previous ACI from suffix
2. Add an anonymous ACI for DNS check
3. Bind as test user
4. Search through added users with a simple paged control
5. Perform steps 4 three times in a row
6. Return ACI to the initial state
7. Go through all steps onece again, but use IP subjectdn
insted of DNS
:assert: No error happens, all users should be found and sorted
"""

users_num = 100
page_size = 5
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:567:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 100, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 100 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00896,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_dns_ip_aci[ip = "::1" or ip = "127.0.0.1"] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, aci_subject = 'ip = "::1" or ip = "127.0.0.1"'

@pytest.mark.parametrize('aci_subject',
('dns = "localhost.localdomain"',
'ip = "::1" or ip = "127.0.0.1"'))
def test_search_dns_ip_aci(topology_st, test_user, aci_subject):
"""Verify that after performing multiple simple paged searches
to completion on the suffix with DNS or IP based ACI

:id: bbfddc46-a8c8-49ae-8c90-7265d05b22a9
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Back up and remove all previous ACI from suffix
2. Add an anonymous ACI for DNS check
3. Bind as test user
4. Search through added users with a simple paged control
5. Perform steps 4 three times in a row
6. Return ACI to the initial state
7. Go through all steps onece again, but use IP subjectdn
insted of DNS
:assert: No error happens, all users should be found and sorted
"""

users_num = 100
page_size = 5
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:567:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 100, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 100 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00676,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_multiple_paging 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None

def test_search_multiple_paging(topology_st, test_user):
"""Verify that after performing multiple simple paged searches
on a single connection without a complition, it wouldn't fail.

:id: 628b29a6-2d47-4116-a88d-00b87405ef7f
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Initiate the search with a simple paged control
3. Acquire the returned cookie only one time
4. Perform steps 2 and 3 three times in a row
:assert: No error happens
"""

users_num = 100
page_size = 30
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:635:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 100, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 100 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00684,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_invalid_cookie[1000] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, invalid_cookie = 1000

@pytest.mark.parametrize("invalid_cookie", [1000, -1])
def test_search_invalid_cookie(topology_st, test_user, invalid_cookie):
"""Verify that using invalid cookie while performing
search with the simple paged results control throws
a TypeError exception

:id: 107be12d-4fe4-47fe-ae86-f3e340a56f42
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Initiate the search with a simple paged control
3. Put an invalid cookie (-1, 1000) to the control
4. Continue the search
:assert: It will throw an TypeError exception
"""

users_num = 100
page_size = 50
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:693:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 100, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 100 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00890,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_invalid_cookie[-1] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, invalid_cookie = -1

@pytest.mark.parametrize("invalid_cookie", [1000, -1])
def test_search_invalid_cookie(topology_st, test_user, invalid_cookie):
"""Verify that using invalid cookie while performing
search with the simple paged results control throws
a TypeError exception

:id: 107be12d-4fe4-47fe-ae86-f3e340a56f42
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Initiate the search with a simple paged control
3. Put an invalid cookie (-1, 1000) to the control
4. Continue the search
:assert: It will throw an TypeError exception
"""

users_num = 100
page_size = 50
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:693:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 100, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 100 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00018,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_abandon_with_zero_size 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None

def test_search_abandon_with_zero_size(topology_st, test_user):
"""Verify that search with simple paged results control
can be abandon using page_size = 0

:id: d2fd9a10-84e1-4b69-a8a7-36ca1427c171
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Bind as test user
2. Search through added users with a simple paged control
and page_size = 0
:assert: No cookie should be returned at all
"""

users_num = 10
page_size = 0
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:743:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 10, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 10 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00672,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_pagedsizelimit_success 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None

def test_search_pagedsizelimit_success(topology_st, test_user):
"""Verify that search with a simple paged results control
returns all entries it should without errors while
valid value set to nsslapd-pagedsizelimit.

:id: 88193f10-f6f0-42f5-ae9c-ff34b8f9ee8c
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
10 users for the search base
:steps: 1. Set nsslapd-pagedsizelimit: 20
2. Bind as test user
3. Search through added users with a simple paged control
using page_size = 10
:assert: All users should be found
"""

users_num = 10
page_size = 10
attr_name = 'nsslapd-pagedsizelimit'
attr_value = '20'
attr_value_bck = change_conf_attr(topology_st, DN_CONFIG,
> attr_name, attr_value)

suites/paged_results/paged_results_test.py:794:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
suffix = 'cn=config', attr_name = 'nsslapd-pagedsizelimit', attr_value = '20'

def change_conf_attr(topology_st, suffix, attr_name, attr_value):
"""Change configurational attribute in the given suffix.

Returns previous attribute value.
"""

try:
entries = topology_st.standalone.search_s(suffix, ldap.SCOPE_BASE,
'objectclass=top',
[attr_name])
attr_value_bck = entries[0].data.get(attr_name)
log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % (
attr_name, attr_value, attr_value_bck, suffix))
if attr_value is None:
topology_st.standalone.modify_s(suffix, [(ldap.MOD_DELETE,
attr_name,
attr_value)])
else:
topology_st.standalone.modify_s(suffix, [(ldap.MOD_REPLACE,
attr_name,
attr_value)])
except ldap.LDAPError as e:
log.error('Failed to change attr value (%s): error (%s)' % (attr_name,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:184: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
ERROR:tests.suites.paged_results.paged_results_test:Failed to change attr value (nsslapd-pagedsizelimit): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_nspagedsizelimit[5-15-PASS] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr = '5', user_attr = '15', expected_rs = 'PASS'

@pytest.mark.parametrize('conf_attr,user_attr,expected_rs',
(('5', '15', 'PASS'), ('15', '5', ldap.SIZELIMIT_EXCEEDED)))
def test_search_nspagedsizelimit(topology_st, test_user,
conf_attr, user_attr, expected_rs):
"""Verify that nsPagedSizeLimit attribute overrides
nsslapd-pagedsizelimit while performing search with
the simple paged results control.

:id: b08c6ad2-ba28-447a-9f04-5377c3661d0d
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
10 users for the search base
:steps: 1. Set nsslapd-pagedsizelimit: 5
2. Set nsPagedSizeLimit: 15
3. Bind as test user
4. Search through added users with a simple paged control
using page_size = 10
5. Bind as Directory Manager
6. Restore all values
7. Set nsslapd-pagedsizelimit: 15
8. Set nsPagedSizeLimit: 5
9. Bind as test user
10. Search through added users with a simple paged control
using page_size = 10
:assert: After the steps 1-4, it should PASS.
After the steps 7-10, it should throw
SIZELIMIT_EXCEEDED exception
"""

users_num = 10
page_size = 10
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:851:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 10, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 10 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00405,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_nspagedsizelimit[15-5-SIZELIMIT_EXCEEDED] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr = '15', user_attr = '5'
expected_rs = <class 'ldap.SIZELIMIT_EXCEEDED'>

@pytest.mark.parametrize('conf_attr,user_attr,expected_rs',
(('5', '15', 'PASS'), ('15', '5', ldap.SIZELIMIT_EXCEEDED)))
def test_search_nspagedsizelimit(topology_st, test_user,
conf_attr, user_attr, expected_rs):
"""Verify that nsPagedSizeLimit attribute overrides
nsslapd-pagedsizelimit while performing search with
the simple paged results control.

:id: b08c6ad2-ba28-447a-9f04-5377c3661d0d
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
10 users for the search base
:steps: 1. Set nsslapd-pagedsizelimit: 5
2. Set nsPagedSizeLimit: 15
3. Bind as test user
4. Search through added users with a simple paged control
using page_size = 10
5. Bind as Directory Manager
6. Restore all values
7. Set nsslapd-pagedsizelimit: 15
8. Set nsPagedSizeLimit: 5
9. Bind as test user
10. Search through added users with a simple paged control
using page_size = 10
:assert: After the steps 1-4, it should PASS.
After the steps 7-10, it should throw
SIZELIMIT_EXCEEDED exception
"""

users_num = 10
page_size = 10
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:851:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 10, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 10 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00121,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_paged_limits[conf_attr_values0-ADMINLIMIT_EXCEEDED] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr_values = ('5000', '100', '100')
expected_rs = <class 'ldap.ADMINLIMIT_EXCEEDED'>

@pytest.mark.parametrize('conf_attr_values,expected_rs',
((('5000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED),
(('5000', '120', '122'), 'PASS')))
def test_search_paged_limits(topology_st, test_user, conf_attr_values, expected_rs):
"""Verify that nsslapd-idlistscanlimit and
nsslapd-lookthroughlimit can limit the administrator
search abilities.

:id: e0f8b916-7276-4bd3-9e73-8696a4468811
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
10 users for the search base
:steps: 1. Set nsslapd-sizelimit and nsslapd-pagedsizelimit to 5000
2. Set nsslapd-idlistscanlimit: 120
3. Set nsslapd-lookthroughlimit: 122
4. Bind as test user
5. Search through added users with a simple paged control
using page_size = 10
6. Bind as Directory Manager
7. Set nsslapd-idlistscanlimit: 100
8. Set nsslapd-lookthroughlimit: 100
9. Bind as test user
10. Search through added users with a simple paged control
using page_size = 10
:assert: After the steps 1-4, it should PASS.
After the steps 7-10, it should throw
ADMINLIMIT_EXCEEDED exception
"""

users_num = 101
page_size = 10
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:919:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 101, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 101 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00612,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_paged_limits[conf_attr_values1-PASS] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr_values = ('5000', '120', '122')
expected_rs = 'PASS'

@pytest.mark.parametrize('conf_attr_values,expected_rs',
((('5000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED),
(('5000', '120', '122'), 'PASS')))
def test_search_paged_limits(topology_st, test_user, conf_attr_values, expected_rs):
"""Verify that nsslapd-idlistscanlimit and
nsslapd-lookthroughlimit can limit the administrator
search abilities.

:id: e0f8b916-7276-4bd3-9e73-8696a4468811
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
10 users for the search base
:steps: 1. Set nsslapd-sizelimit and nsslapd-pagedsizelimit to 5000
2. Set nsslapd-idlistscanlimit: 120
3. Set nsslapd-lookthroughlimit: 122
4. Bind as test user
5. Search through added users with a simple paged control
using page_size = 10
6. Bind as Directory Manager
7. Set nsslapd-idlistscanlimit: 100
8. Set nsslapd-lookthroughlimit: 100
9. Bind as test user
10. Search through added users with a simple paged control
using page_size = 10
:assert: After the steps 1-4, it should PASS.
After the steps 7-10, it should throw
ADMINLIMIT_EXCEEDED exception
"""

users_num = 101
page_size = 10
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:919:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 101, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 101 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00824,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_paged_user_limits[conf_attr_values0-ADMINLIMIT_EXCEEDED] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr_values = ('1000', '100', '100')
expected_rs = <class 'ldap.ADMINLIMIT_EXCEEDED'>

@pytest.mark.parametrize('conf_attr_values,expected_rs',
((('1000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED),
(('1000', '120', '122'), 'PASS')))
def test_search_paged_user_limits(topology_st, test_user, conf_attr_values, expected_rs):
"""Verify that nsPagedIDListScanLimit and nsPagedLookthroughLimit
override nsslapd-idlistscanlimit and nsslapd-lookthroughlimit
while performing search with the simple paged results control.

:id: 69e393e9-1ab8-4f4e-b4a1-06ca63dc7b1b
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
10 users for the search base
:steps: 1. Set nsslapd-idlistscanlimit: 1000
2. Set nsslapd-lookthroughlimit: 1000
3. Set nsPagedIDListScanLimit: 120
4. Set nsPagedLookthroughLimit: 122
5. Bind as test user
6. Search through added users with a simple paged control
using page_size = 10
7. Bind as Directory Manager
8. Set nsPagedIDListScanLimit: 100
9. Set nsPagedLookthroughLimit: 100
10. Bind as test user
11. Search through added users with a simple paged control
using page_size = 10
:assert: After the steps 1-4, it should PASS.
After the steps 8-11, it should throw
ADMINLIMIT_EXCEEDED exception
"""

users_num = 101
page_size = 10
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:995:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 101, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 101 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00098,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_search_paged_user_limits[conf_attr_values1-PASS] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr_values = ('1000', '120', '122')
expected_rs = 'PASS'

@pytest.mark.parametrize('conf_attr_values,expected_rs',
((('1000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED),
(('1000', '120', '122'), 'PASS')))
def test_search_paged_user_limits(topology_st, test_user, conf_attr_values, expected_rs):
"""Verify that nsPagedIDListScanLimit and nsPagedLookthroughLimit
override nsslapd-idlistscanlimit and nsslapd-lookthroughlimit
while performing search with the simple paged results control.

:id: 69e393e9-1ab8-4f4e-b4a1-06ca63dc7b1b
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
10 users for the search base
:steps: 1. Set nsslapd-idlistscanlimit: 1000
2. Set nsslapd-lookthroughlimit: 1000
3. Set nsPagedIDListScanLimit: 120
4. Set nsPagedLookthroughLimit: 122
5. Bind as test user
6. Search through added users with a simple paged control
using page_size = 10
7. Bind as Directory Manager
8. Set nsPagedIDListScanLimit: 100
9. Set nsPagedLookthroughLimit: 100
10. Bind as test user
11. Search through added users with a simple paged control
using page_size = 10
:assert: After the steps 1-4, it should PASS.
After the steps 8-11, it should throw
ADMINLIMIT_EXCEEDED exception
"""

users_num = 101
page_size = 10
> users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:995:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 101, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 101 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00825,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_ger_basic 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None

def test_ger_basic(topology_st, test_user):
"""Verify that search with a simple paged results control
and get effective rights control returns all entries
it should without errors.

:id: 7b0bdfc7-a2f2-4c1a-bcab-f1eb8b330d45
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
variated number of users for the search base
:steps: 1. Search through added users with a simple paged control
and get effective rights control
:assert: All users should be found, every found entry should have
an 'attributeLevelRights' returned
"""

> users_list = add_users(topology_st, 20, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:1054:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 20, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 20 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00975,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_maxsimplepaged_per_conn_success[None] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr_value = None

@pytest.mark.parametrize('conf_attr_value', (None, '-1', '1000'))
def test_maxsimplepaged_per_conn_success(topology_st, test_user, conf_attr_value):
"""Verify that nsslapd-maxsimplepaged-per-conn acts according design

:id: 192e2f25-04ee-4ff9-9340-d875dcbe8011
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
20 users for the search base
:steps: 1. Set nsslapd-maxsimplepaged-per-conn in cn=config
to the next values: no value, -1, some positive
2. Search through the added users with a simple paged control
using page size = 4
:assert: If no value or value = -1 - all users should be found,
default behaviour;
If the value is positive, the value is the max simple paged
results requests per connection.
"""

> users_list = add_users(topology_st, 20, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:1154:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 20, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 20 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00237,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_maxsimplepaged_per_conn_success[-1] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr_value = '-1'

@pytest.mark.parametrize('conf_attr_value', (None, '-1', '1000'))
def test_maxsimplepaged_per_conn_success(topology_st, test_user, conf_attr_value):
"""Verify that nsslapd-maxsimplepaged-per-conn acts according design

:id: 192e2f25-04ee-4ff9-9340-d875dcbe8011
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
20 users for the search base
:steps: 1. Set nsslapd-maxsimplepaged-per-conn in cn=config
to the next values: no value, -1, some positive
2. Search through the added users with a simple paged control
using page size = 4
:assert: If no value or value = -1 - all users should be found,
default behaviour;
If the value is positive, the value is the max simple paged
results requests per connection.
"""

> users_list = add_users(topology_st, 20, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:1154:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 20, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 20 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00557,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_maxsimplepaged_per_conn_success[1000] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr_value = '1000'

@pytest.mark.parametrize('conf_attr_value', (None, '-1', '1000'))
def test_maxsimplepaged_per_conn_success(topology_st, test_user, conf_attr_value):
"""Verify that nsslapd-maxsimplepaged-per-conn acts according design

:id: 192e2f25-04ee-4ff9-9340-d875dcbe8011
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
20 users for the search base
:steps: 1. Set nsslapd-maxsimplepaged-per-conn in cn=config
to the next values: no value, -1, some positive
2. Search through the added users with a simple paged control
using page size = 4
:assert: If no value or value = -1 - all users should be found,
default behaviour;
If the value is positive, the value is the max simple paged
results requests per connection.
"""

> users_list = add_users(topology_st, 20, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:1154:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 20, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 20 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00404,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_maxsimplepaged_per_conn_failure[0] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr_value = '0'

@pytest.mark.parametrize('conf_attr_value', ('0', '1'))
def test_maxsimplepaged_per_conn_failure(topology_st, test_user, conf_attr_value):
"""Verify that nsslapd-maxsimplepaged-per-conn acts according design

:id: eb609e63-2829-4331-8439-a35f99694efa
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
20 users for the search base
:steps: 1. Set nsslapd-maxsimplepaged-per-conn = 0 in cn=config
2. Search through the added users with a simple paged control
using page size = 4
3. Set nsslapd-maxsimplepaged-per-conn = 1 in cn=config
4. Search through the added users with a simple paged control
using page size = 4 two times, but don't close the connections
:assert: During the searches UNWILLING_TO_PERFORM should be throwned
"""

> users_list = add_users(topology_st, 20, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:1200:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 20, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 20 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00650,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/paged_results/paged_results_test.py::test_maxsimplepaged_per_conn_failure[1] 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
test_user = None, conf_attr_value = '1'

@pytest.mark.parametrize('conf_attr_value', ('0', '1'))
def test_maxsimplepaged_per_conn_failure(topology_st, test_user, conf_attr_value):
"""Verify that nsslapd-maxsimplepaged-per-conn acts according design

:id: eb609e63-2829-4331-8439-a35f99694efa
:feature: Simple paged results
:setup: Standalone instance, test user for binding,
20 users for the search base
:steps: 1. Set nsslapd-maxsimplepaged-per-conn = 0 in cn=config
2. Search through the added users with a simple paged control
using page size = 4
3. Set nsslapd-maxsimplepaged-per-conn = 1 in cn=config
4. Search through the added users with a simple paged control
using page size = 4 two times, but don't close the connections
:assert: During the searches UNWILLING_TO_PERFORM should be throwned
"""

> users_list = add_users(topology_st, 20, DEFAULT_SUFFIX)

suites/paged_results/paged_results_test.py:1200:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

topology_st = <lib389.topologies.TopologyMain object at 0x614f850>
users_num = 20, suffix = 'dc=example,dc=com'

def add_users(topology_st, users_num, suffix):
"""Add users to the default suffix

Return the list of added user DNs.
"""

users_list = []
log.info('Adding %d users' % users_num)
for num in sample(range(1000), users_num):
num_ran = int(round(num))
USER_NAME = 'test%05d' % num_ran
USER_DN = 'uid=%s,%s' % (USER_NAME, suffix)
users_list.append(USER_DN)
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top person'.split(),
'objectclass': 'organizationalPerson',
'objectclass': 'inetorgperson',
'cn': USER_NAME,
'sn': USER_NAME,
'userpassword': 'pass%s' % num_ran,
'mail': '%s@redhat.com' % USER_NAME,
'uid': USER_NAME})))
except ldap.LDAPError as e:
log.error('Failed to add user (%s): error (%s)' % (USER_DN,
e.message['desc']))
> raise e
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

suites/paged_results/paged_results_test.py:143: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.paged_results.paged_results_test:Adding 20 users ERROR:tests.suites.paged_results.paged_results_test:Failed to add user (uid=test00248,dc=example,dc=com): error (Can't contact LDAP server)
Failed suites/password/pwdAdmin_test.py::test_pwdAdmin_init 0.03
topology_st = <lib389.topologies.TopologyMain object at 0x65bd850>

def test_pwdAdmin_init(topology_st):
'''
Create our future Password Admin entry, set the password policy, and test
that its working
'''

log.info('test_pwdAdmin_init: Creating Password Administator entries...')

# Add Password Admin 1
try:
topology_st.standalone.add_s(Entry((ADMIN_DN, {'objectclass': "top extensibleObject".split(),
'cn': ADMIN_NAME,
'userpassword': ADMIN_PWD})))
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed to add test user' + ADMIN_DN + ': error ' + e.message['desc'])
assert False

# Add Password Admin 2
try:
topology_st.standalone.add_s(Entry((ADMIN2_DN, {'objectclass': "top extensibleObject".split(),
'cn': ADMIN2_NAME,
'userpassword': ADMIN_PWD})))
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed to add test user ' + ADMIN2_DN + ': error ' + e.message['desc'])
assert False

# Add Password Admin Group
try:
topology_st.standalone.add_s(Entry((ADMIN_GROUP_DN, {'objectclass': "top groupOfUNiqueNames".split(),
'cn': 'password admin group',
'uniquemember': ADMIN_DN,
'uniquemember': ADMIN2_DN})))
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed to add group' + ADMIN_GROUP_DN + ': error ' + e.message['desc'])
assert False

# Configure password policy
log.info('test_pwdAdmin_init: Configuring password policy...')
try:
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'),
(ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
(ldap.MOD_REPLACE, 'passwordMinCategories', '1'),
(ldap.MOD_REPLACE, 'passwordMinTokenLength', '1'),
(ldap.MOD_REPLACE, 'passwordExp', 'on'),
(ldap.MOD_REPLACE, 'passwordMinDigits', '1'),
(ldap.MOD_REPLACE, 'passwordMinSpecials', '1')])
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed configure password policy: ' +
e.message['desc'])
assert False

#
# Add an aci to allow everyone all access (just makes things easier)
#
log.info('Add aci to allow password admin to add/update entries...')

ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX
ACI_TARGETATTR = "(targetattr = *)"
ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) "
ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)"
ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
try:
topology_st.standalone.modify_s(SUFFIX, mod)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed to add aci for password admin: ' +
e.message['desc'])
assert False

#
# Bind as the future Password Admin
#
log.info('test_pwdAdmin_init: Bind as the Password Administator (before activating)...')
try:
topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin_init: Failed to bind as the Password Admin: ' +
e.message['desc'])
> assert False
E assert False

suites/password/pwdAdmin_test.py:107: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.pwdAdmin_test:test_pwdAdmin_init: Creating Password Administator entries... INFO:tests.suites.password.pwdAdmin_test:test_pwdAdmin_init: Configuring password policy... INFO:tests.suites.password.pwdAdmin_test:Add aci to allow password admin to add/update entries... INFO:tests.suites.password.pwdAdmin_test:test_pwdAdmin_init: Bind as the Password Administator (before activating)... CRITICAL:tests.suites.password.pwdAdmin_test:test_pwdAdmin_init: Failed to bind as the Password Admin: Can't contact LDAP server
Failed suites/password/pwdAdmin_test.py::test_pwdAdmin 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x65bd850>

def test_pwdAdmin(topology_st):
'''
Test that password administrators/root DN can
bypass password syntax/policy.

We need to test how passwords are modified in
existing entries, and when adding new entries.

Create the Password Admin entry, but do not set
it as an admin yet. Use the entry to verify invalid
passwords are caught. Then activate the password
admin and make sure it can bypass password policy.
'''

#
# Now activate a password administator, bind as root dn to do the config
# update, then rebind as the password admin
#
log.info('test_pwdAdmin: Activate the Password Administator...')

#
# Setup our test entry, and test password policy is working
#
entry = Entry(ENTRY_DN)
entry.setValues('objectclass', 'top', 'person')
entry.setValues('sn', ENTRY_NAME)
entry.setValues('cn', ENTRY_NAME)

# Bind as Root DN
try:
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' +
e.message['desc'])
> assert False
E assert False

suites/password/pwdAdmin_test.py:174: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.pwdAdmin_test:test_pwdAdmin: Activate the Password Administator... CRITICAL:tests.suites.password.pwdAdmin_test:test_pwdAdmin: Root DN failed to authenticate: Can't contact LDAP server
Failed suites/password/pwdPolicy_syntax_test.py::test_pwdPolicy_syntax 0.07
topology_st = <lib389.topologies.TopologyMain object at 0x67dc390>

def test_pwdPolicy_syntax(topology_st):
'''
Password policy test: Ensure that on a password change, the policy syntax
is enforced correctly.
'''

# Create a user
_create_user(topology_st.standalone)

# Set the password policy globally
topology_st.standalone.config.set('passwordCheckSyntax', 'on')
topology_st.standalone.config.set('nsslapd-pwpolicy-local', 'off')
topology_st.standalone.config.set('passwordMinCategories', '1')

#
# Test each syntax catagory
#

# Min Length
tryPassword(topology_st.standalone, 'passwordMinLength', 10, 2, 'passwd',
> 'password123', 'length too short')

suites/password/pwdPolicy_syntax_test.py:139:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
suites/password/pwdPolicy_syntax_test.py:91: in tryPassword
setPolicy(inst, policy_attr, value)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

inst = <lib389.DirSrv object at 0x67ab3d0>, attr = 'passwordMinLength'
value = '10'

def setPolicy(inst, attr, value):
"""Bind as ROot DN, set polcy, and then bind as user"""
try:
inst.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
log.fatal("Failed to bind as Directory Manager: " + str(e))
assert False

value = str(value)
"""
if value == '0':
# Remove the policy attribute
try:
inst.modify_s("cn=config",
[(ldap.MOD_DELETE, attr, None)])
except ldap.LDAPError as e:
log.fatal("Failed to rmeove password policy %s: %s" %
(attr, str(e)))
assert False
else:
"""
# Set the policy value
inst.config.set(attr, value)

try:
inst.simple_bind_s(USER_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal("Failed to bind: " + str(e))
> assert False
E assert False

suites/password/pwdPolicy_syntax_test.py:62: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
CRITICAL:tests.suites.password.pwdPolicy_syntax_test:Failed to bind: {'desc': "Can't contact LDAP server"}
Failed suites/password/pwdPolicy_warning_test.py::test_expiry_time 0.02
topology_st = <lib389.topologies.TopologyMain object at 0x67d5f10>
global_policy = None, add_user = None

def test_expiry_time(topology_st, global_policy, add_user):
"""Test whether the password expiry warning
time for a user is returned appropriately

:ID: 7adfd395-9b25-4cc0-9b71-14710dc1a28c
:feature: Pasword Expiry Warning Time
:setup: Standalone DS instance with,
1. Global password policy configured as below:
passwordExp: on
passwordMaxAge: 172800
passwordWarning: 86400
passwordSendExpiringTime: on
2. User entry for binding
:steps: 1. Bind as the user
2. Request the control for the user
:assert: The password expiry warning time for the user should be
returned
"""

res_ctrls = None
try:
log.info('Get the password expiry warning time')
log.info("Binding with ({:s}) and requesting the password expiry warning time" \
.format(USER_DN))
res_ctrls = get_password_warning(topology_st)

log.info('Check whether the time is returned')
assert res_ctrls

log.info("user's password will expire in {:d} seconds" \
.format(res_ctrls[0].timeBeforeExpiration))
finally:
log.info("Rebinding as DM")
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

suites/password/pwdPolicy_warning_test.py:306:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x67d5b50>
func = <built-in method simple_bind of LDAP object at 0x2b55788>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default values INFO:tests.suites.password.pwdPolicy_warning_test:Set the new values INFO:tests.suites.password.pwdPolicy_warning_test:Add the user ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the password expiry warning time INFO:tests.suites.password.pwdPolicy_warning_test:Binding with (uid=tuser,dc=example,dc=com) and requesting the password expiry warning time INFO:tests.suites.password.pwdPolicy_warning_test:Bind with the user and request the password expiry warning time ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to get password expiry warning time, error:Can't contact LDAP server INFO:tests.suites.password.pwdPolicy_warning_test:Rebinding as DM
Failed suites/password/pwd_algo_test.py::test_pwd_algo_test 0.03
topology_st = <lib389.topologies.TopologyMain object at 0x68328d0>

def test_pwd_algo_test(topology_st):
"""Assert that all of our password algorithms correctly PASS and FAIL varying
password conditions.
"""

for algo in (
'CLEAR', 'CRYPT', 'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA', 'SSHA256', 'SSHA384',
'SSHA512'):
> _test_algo(topology_st.standalone, algo)

suites/password/pwd_algo_test.py:75:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
suites/password/pwd_algo_test.py:48: in _test_algo
assert (_test_bind(inst, 'Secret123'))
suites/password/pwd_algo_test.py:24: in _test_bind
userconn.simple_bind_s(USER_DN, password)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:223: in simple_bind_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ldap.ldapobject.SimpleLDAPObject instance at 0x5593d88>
func = <built-in method result4 of LDAP object at 0x31d2198>
args = (1, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Failed suites/password/pwp_history_test.py::test_pwp_history_test 1.04
topology_st = <lib389.topologies.TopologyMain object at 0x682ac10>

def test_pwp_history_test(topology_st):
"""
Test password policy history feature:
- Test password history is enforced
- Test password history works after an Admin resets the password
- Test that the correct number of passwords are stored in history
"""

USER_DN = 'uid=testuser,' + DEFAULT_SUFFIX

#
# Configure password history policy and add a test user
#
try:
topology_st.standalone.modify_s("cn=config",
[(ldap.MOD_REPLACE,
'passwordHistory', 'on'),
(ldap.MOD_REPLACE,
'passwordInHistory', '3'),
(ldap.MOD_REPLACE,
'passwordChange', 'on'),
(ldap.MOD_REPLACE,
'passwordStorageScheme', 'CLEAR')])
log.info('Configured password policy.')
except ldap.LDAPError as e:
log.fatal('Failed to configure password policy: ' + str(e))
assert False
time.sleep(1)

try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': ['top', 'extensibleObject'],
'sn': 'user',
'cn': 'test user',
'uid': 'testuser',
'userpassword': 'password'})))
except ldap.LDAPError as e:
log.fatal('Failed to add test user' + USER_DN + ': error ' + str(e))
assert False

#
# Test that password history is enforced.
#
try:
topology_st.standalone.simple_bind_s(USER_DN, 'password')
except ldap.LDAPError as e:
log.fatal('Failed to bind as user: ' + str(e))
> assert False
E assert False

suites/password/pwp_history_test.py:65: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.pwp_history_test:Configured password policy. CRITICAL:tests.suites.password.pwp_history_test:Failed to bind as user: {'desc': "Can't contact LDAP server"}
Failed suites/plugins/rootdn_plugin_test.py::test_rootdn_access_specific_time 0.02
topology_st = <lib389.topologies.TopologyMain object at 0x68853d0>

def test_rootdn_access_specific_time(topology_st):
'''
Test binding inside and outside of a specific time
'''

log.info('Running test_rootdn_access_specific_time...')

# Get the current time, and bump it ahead twohours
current_hour = time.strftime("%H")
if int(current_hour) > 12:
open_time = '0200'
close_time = '0400'
else:
open_time = '1600'
close_time = '1800'

try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', open_time),
(ldap.MOD_ADD, 'rootdn-close-time', close_time)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: Failed to set (blocking) open/close times: error ' +
e.message['desc'])
assert False

#
# Bind as Root DN - should fail
#
try:
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
succeeded = True
except ldap.LDAPError as e:
succeeded = False

if succeeded:
log.fatal('test_rootdn_access_specific_time: Root DN was incorrectly able to bind')
assert False

#
# Set config to allow the entire day
#
try:
topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD)
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_specific_time: test_rootdn: failed to bind as user1')
> assert False
E assert False

suites/plugins/rootdn_plugin_test.py:127: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.rootdn_plugin_test:Running test_rootdn_access_specific_time... CRITICAL:tests.suites.plugins.rootdn_plugin_test:test_rootdn_access_specific_time: test_rootdn: failed to bind as user1
Failed suites/plugins/rootdn_plugin_test.py::test_rootdn_access_day_of_week 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x68853d0>

def test_rootdn_access_day_of_week(topology_st):
'''
Test the days of week feature
'''

log.info('Running test_rootdn_access_day_of_week...')

days = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat')
day = int(time.strftime("%w", time.gmtime()))

if day == 6:
# Handle the roll over from Saturday into Sunday
deny_days = days[1] + ', ' + days[2]
allow_days = days[6] + ',' + days[0]
elif day > 3:
deny_days = days[0] + ', ' + days[1]
allow_days = days[day] + ',' + days[day - 1]
else:
deny_days = days[4] + ',' + days[5]
allow_days = days[day] + ',' + days[day + 1]

log.info('Today: ' + days[day])
log.info('Allowed days: ' + allow_days)
log.info('Deny days: ' + deny_days)

#
# Set the deny days
#
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
deny_days)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' +
e.message['desc'])
> assert False
E assert False

suites/plugins/rootdn_plugin_test.py:199: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.rootdn_plugin_test:Running test_rootdn_access_day_of_week... INFO:tests.suites.plugins.rootdn_plugin_test:Today: Wed INFO:tests.suites.plugins.rootdn_plugin_test:Allowed days: Wed,Thu INFO:tests.suites.plugins.rootdn_plugin_test:Deny days: Thu,Fri CRITICAL:tests.suites.plugins.rootdn_plugin_test:test_rootdn_access_day_of_week: Failed to set the deny days: error Can't contact LDAP server
Failed suites/plugins/rootdn_plugin_test.py::test_rootdn_access_denied_ip 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x68853d0>

def test_rootdn_access_denied_ip(topology_st):
'''
Test denied IP feature - we can just test denying 127.0.01
'''

log.info('Running test_rootdn_access_denied_ip...')

try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
'rootdn-deny-ip',
'127.0.0.1'),
(ldap.MOD_ADD,
'rootdn-deny-ip',
'::1')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' +
e.message['desc'])
> assert False
E assert False

suites/plugins/rootdn_plugin_test.py:275: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.rootdn_plugin_test:Running test_rootdn_access_denied_ip... CRITICAL:tests.suites.plugins.rootdn_plugin_test:test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error Can't contact LDAP server
Failed suites/plugins/rootdn_plugin_test.py::test_rootdn_access_denied_host 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x68853d0>

def test_rootdn_access_denied_host(topology_st):
'''
Test denied Host feature - we can just test denying localhost
'''

log.info('Running test_rootdn_access_denied_host...')
hostname = socket.gethostname()
localhost = DirSrvTools.getLocalhost()
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
'rootdn-deny-host',
hostname)])
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
'rootdn-deny-host',
localhost)])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_denied_host: Failed to set deny host: error ' +
e.message['desc'])
> assert False
E assert False

suites/plugins/rootdn_plugin_test.py:351: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.rootdn_plugin_test:Running test_rootdn_access_denied_host... CRITICAL:tests.suites.plugins.rootdn_plugin_test:test_rootdn_access_denied_host: Failed to set deny host: error Can't contact LDAP server
Failed suites/plugins/rootdn_plugin_test.py::test_rootdn_access_allowed_ip 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x68853d0>

def test_rootdn_access_allowed_ip(topology_st):
'''
Test allowed ip feature
'''

log.info('Running test_rootdn_access_allowed_ip...')

#
# Set allowed host to an unknown host - blocks the Root DN
#
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '255.255.255.255')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' +
e.message['desc'])
> assert False
E assert False

suites/plugins/rootdn_plugin_test.py:424: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.rootdn_plugin_test:Running test_rootdn_access_allowed_ip... CRITICAL:tests.suites.plugins.rootdn_plugin_test:test_rootdn_access_allowed_ip: Failed to set allowed host: error Can't contact LDAP server
Failed suites/plugins/rootdn_plugin_test.py::test_rootdn_access_allowed_host 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x68853d0>

def test_rootdn_access_allowed_host(topology_st):
'''
Test allowed ip feature
'''

log.info('Running test_rootdn_access_allowed_host...')

#
# Set allowed host to an unknown host - blocks the Root DN
#
try:
topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'i.dont.exist.com')])
except ldap.LDAPError as e:
log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' +
e.message['desc'])
> assert False
E assert False

suites/plugins/rootdn_plugin_test.py:498: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.rootdn_plugin_test:Running test_rootdn_access_allowed_host... CRITICAL:tests.suites.plugins.rootdn_plugin_test:test_rootdn_access_allowed_host: Failed to set allowed host: error Can't contact LDAP server
Failed suites/schema/test_schema.py::test_schema_comparewithfiles 0.13
topology_st = <lib389.topologies.TopologyMain object at 0x70d3d50>

def test_schema_comparewithfiles(topology_st):
'''Compare the schema from ldap cn=schema with the schema files'''

log.info('Running test_schema_comparewithfiles...')

retval = True
schemainst = topology_st.standalone
ldschema = schemainst.schema.get_subschema()
assert ldschema
> for fn in schemainst.schema.list_files():

suites/schema/test_schema.py:129:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/schema.py:42: in list_files
file_list += glob.glob(self.conn.ds_paths.system_schema_dir + "/*.ldif")
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/paths.py:153: in __getattr__
return self._config.get(SECTION, name).format(instance_name=self._serverid)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ConfigParser.SafeConfigParser instance at 0x7244878>, section = 'slapd'
option = 'system_schema_dir', raw = False, vars = None

def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.

If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `defaults' in that order.

All % interpolations are expanded in the return values, unless the
optional argument `raw' is true. Values for interpolation keys are
looked up in the same manner as the option.

The section DEFAULT is special.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
vardict[self.optionxform(key)] = value
d = _Chainmap(vardict, sectiondict, self._defaults)
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
> raise NoOptionError(option, section)
E NoOptionError: No option 'system_schema_dir' in section: 'slapd'

/usr/lib64/python2.7/ConfigParser.py:618: NoOptionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.schema.test_schema:Running test_schema_comparewithfiles...
Failed tickets/ticket1347760_test.py::test_ticket1347760 0.05
topology_st = <lib389.topologies.TopologyMain object at 0x70f3d90>

def test_ticket1347760(topology_st):
"""
Prevent revealing the entry info to whom has no access rights.
"""
log.info('Testing Bug 1347760 - Information disclosure via repeated use of LDAP ADD operation, etc.')

log.info('Disabling accesslog logbuffering')
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-accesslog-logbuffering', 'off')])

log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD))
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

log.info('Adding ou=%s a bind user belongs to.' % BOU)
topology_st.standalone.add_s(Entry((BINDOU, {
'objectclass': 'top organizationalunit'.split(),
'ou': BOU})))

log.info('Adding a bind user.')
topology_st.standalone.add_s(Entry((BINDDN,
{'objectclass': "top person organizationalPerson inetOrgPerson".split(),
'cn': 'bind user',
'sn': 'user',
'userPassword': BINDPW})))

log.info('Adding a test user.')
topology_st.standalone.add_s(Entry((TESTDN,
{'objectclass': "top person organizationalPerson inetOrgPerson".split(),
'cn': 'test user',
'sn': 'user',
'userPassword': TESTPW})))

log.info('Deleting aci in %s.' % DEFAULT_SUFFIX)
topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)])

log.info('While binding as DM, acquire an access log path and instance dir')
ds_paths = Paths(serverid=topology_st.standalone.serverid,
instance=topology_st.standalone)
file_path = ds_paths.access_log
inst_dir = ds_paths.inst_dir

log.info('Bind case 1. the bind user has no rights to read the entry itself, bind should be successful.')
log.info('Bind as {%s,%s} who has no access rights.' % (BINDDN, BINDPW))
try:
topology_st.standalone.simple_bind_s(BINDDN, BINDPW)
except ldap.LDAPError as e:
log.info('Desc ' + e.message['desc'])
> assert False
E assert False

tickets/ticket1347760_test.py:191: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket1347760_test:Testing Bug 1347760 - Information disclosure via repeated use of LDAP ADD operation, etc. INFO:tests.tickets.ticket1347760_test:Disabling accesslog logbuffering INFO:tests.tickets.ticket1347760_test:Bind as {cn=Directory Manager,password} INFO:tests.tickets.ticket1347760_test:Adding ou=BOU a bind user belongs to. INFO:tests.tickets.ticket1347760_test:Adding a bind user. INFO:tests.tickets.ticket1347760_test:Adding a test user. INFO:tests.tickets.ticket1347760_test:Deleting aci in dc=example,dc=com. INFO:tests.tickets.ticket1347760_test:While binding as DM, acquire an access log path and instance dir INFO:tests.tickets.ticket1347760_test:Bind case 1. the bind user has no rights to read the entry itself, bind should be successful. INFO:tests.tickets.ticket1347760_test:Bind as {uid=buser123,ou=BOU,dc=example,dc=com,buser123} who has no access rights. INFO:tests.tickets.ticket1347760_test:Desc Can't contact LDAP server
Failed tickets/ticket397_test.py::test_397 0.77
topology_st = <lib389.topologies.TopologyMain object at 0x7876750>

def test_397(topology_st):
"""
Assert that all of our password algorithms correctly PASS and FAIL varying
password conditions.

"""
if DEBUGGING:
# Add debugging steps(if any)...
log.info("ATTACH NOW")
time.sleep(30)

# Merge this to the password suite in the future

for algo in ('PBKDF2_SHA256',):
for i in range(0, 10):
> _test_algo(topology_st.standalone, algo)

tickets/ticket397_test.py:92:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tickets/ticket397_test.py:56: in _test_algo
assert (_test_bind(inst, 'Secret123'))
tickets/ticket397_test.py:24: in _test_bind
userconn.simple_bind_s(USER_DN, password)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:223: in simple_bind_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ldap.ldapobject.SimpleLDAPObject instance at 0x7886908>
func = <built-in method result4 of LDAP object at 0x31d2710>
args = (1, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Failed tickets/ticket47553_test.py::test_ticket47553 0.02
topology_st = <lib389.topologies.TopologyMain object at 0x72ddcd0>
env_setup = None

def test_ticket47553(topology_st, env_setup):
"""Tests, that MODRDN operation is allowed,
if user has ACI right '(all)' under superior entries,
but doesn't have '(modrdn)'
"""

log.info("Bind as %s" % USER)
try:
topology_st.standalone.simple_bind_s(USER, USER_PWD)
except ldap.LDAPError as e:
log.error('Bind failed for %s, error %s' % (USER, e.message['desc']))
> assert False
E assert False

tickets/ticket47553_test.py:86: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:tests.tickets.ticket47553_test:Add a container: ou=test_ou_1,dc=example,dc=com INFO:tests.tickets.ticket47553_test:Add a container: ou=test_ou_2,dc=example,dc=com INFO:tests.tickets.ticket47553_test:Add a user: cn=test_user,ou=test_ou_1,dc=example,dc=com INFO:tests.tickets.ticket47553_test:Add an ACI 'allow (all)' by cn=test_user,ou=test_ou_1,dc=example,dc=com to the ou=test_ou_1,dc=example,dc=com INFO:tests.tickets.ticket47553_test:Add an ACI 'allow (all)' by cn=test_user,ou=test_ou_1,dc=example,dc=com to the ou=test_ou_2,dc=example,dc=com ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47553_test:Bind as cn=test_user,ou=test_ou_1,dc=example,dc=com ERROR:tests.tickets.ticket47553_test:Bind failed for cn=test_user,ou=test_ou_1,dc=example,dc=com, error Can't contact LDAP server
Failed tickets/ticket47653_test.py::test_ticket47653_add 0.02
topology_st = <lib389.topologies.TopologyMain object at 0x7f34f10>

def test_ticket47653_add(topology_st):
'''
It checks that, bound as bind_entry,
- we can not ADD an entry without the proper SELFDN aci.
- with the proper ACI we can not ADD with 'member' attribute
- with the proper ACI and 'member' it succeeds to ADD
'''
topology_st.standalone.log.info("\n\n######################### ADD ######################\n")

# bind as bind_entry
topology_st.standalone.log.info("Bind as %s" % BIND_DN)
> topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)

tickets/ticket47653_test.py:101:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:223: in simple_bind_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f34ad0>
func = <built-in method result4 of LDAP object at 0x34a5148>
args = (16, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ######################### ADD ###################### INFO:lib389:Bind as cn=bind_entry, dc=example,dc=com
Failed tickets/ticket47653_test.py::test_ticket47653_search 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x7f34f10>

def test_ticket47653_search(topology_st):
'''
It checks that, bound as bind_entry,
- we can not search an entry without the proper SELFDN aci.
- adding the ACI, we can search the entry
'''
topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n")
# bind as bind_entry
topology_st.standalone.log.info("Bind as %s" % BIND_DN)
> topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)

tickets/ticket47653_test.py:188:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f34ad0>
func = <built-in method simple_bind of LDAP object at 0x34a5148>
args = ('cn=bind_entry, dc=example,dc=com', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ######################### SEARCH ###################### INFO:lib389:Bind as cn=bind_entry, dc=example,dc=com
Failed tickets/ticket47653_test.py::test_ticket47653_modify 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x7f34f10>

def test_ticket47653_modify(topology_st):
'''
It checks that, bound as bind_entry,
- we can not modify an entry without the proper SELFDN aci.
- adding the ACI, we can modify the entry
'''
# bind as bind_entry
topology_st.standalone.log.info("Bind as %s" % BIND_DN)
> topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)

tickets/ticket47653_test.py:226:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f34ad0>
func = <built-in method simple_bind of LDAP object at 0x34a5148>
args = ('cn=bind_entry, dc=example,dc=com', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:lib389:Bind as cn=bind_entry, dc=example,dc=com
Failed tickets/ticket47653_test.py::test_ticket47653_delete 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x7f34f10>

def test_ticket47653_delete(topology_st):
'''
It checks that, bound as bind_entry,
- we can not delete an entry without the proper SELFDN aci.
- adding the ACI, we can delete the entry
'''
topology_st.standalone.log.info("\n\n######################### DELETE ######################\n")

# bind as bind_entry
topology_st.standalone.log.info("Bind as %s" % BIND_DN)
> topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)

tickets/ticket47653_test.py:276:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x7f34ad0>
func = <built-in method simple_bind of LDAP object at 0x34a5148>
args = ('cn=bind_entry, dc=example,dc=com', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ######################### DELETE ###################### INFO:lib389:Bind as cn=bind_entry, dc=example,dc=com
Failed tickets/ticket47714_test.py::test_ticket47714_run_0 2.13
topology_st = <lib389.topologies.TopologyMain object at 0x8694ed0>

def test_ticket47714_run_0(topology_st):
"""
Check this change has no inpact to the existing functionality.
1. Set account policy config without the new attr alwaysRecordLoginAttr
2. Bind as a test user
3. Bind as the test user again and check the lastLoginTime is updated
4. Waint longer than the accountInactivityLimit time and bind as the test user,
which should fail with CONSTANT_VIOLATION.
"""
_header(topology_st, 'Account Policy - No new attr alwaysRecordLoginAttr in config')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

# Modify Account Policy config entry
topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
(ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'),
(ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'),
(ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
(ldap.MOD_REPLACE, 'limitattrname',
'accountInactivityLimit')])

# Enable the plugins
topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)

> topology_st.standalone.restart(timeout=120)

tickets/ticket47714_test.py:92:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:1258: in restart
self.start(timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:1141: in start
"dirsrv@%s" % self.serverid])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

popenargs = (['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1'],)
kwargs = {}, retcode = 1
cmd = ['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1']

def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.

The arguments are the same as for the Popen constructor. Example:

check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
> raise CalledProcessError(retcode, cmd)
E CalledProcessError: Command '['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1']' returned non-zero exit status 1

/usr/lib64/python2.7/subprocess.py:542: CalledProcessError
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Account Policy - No new attr alwaysRecordLoginAttr in config INFO:lib389:####### INFO:lib389:############################################### Job for dirsrv@standalone_1.service failed because a fatal signal was delivered to the control process. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details.
Failed tickets/ticket47714_test.py::test_ticket47714_run_1 0.00
topology_st = <lib389.topologies.TopologyMain object at 0x8694ed0>

def test_ticket47714_run_1(topology_st):
"""
Verify a new config attr alwaysRecordLoginAttr
1. Set account policy config with the new attr alwaysRecordLoginAttr: lastLoginTime
Note: bogus attr is set to stateattrname.
altstateattrname type value is used for checking whether the account is idle or not.
2. Bind as a test user
3. Bind as the test user again and check the alwaysRecordLoginAttr: lastLoginTime is updated
"""
_header(topology_st, 'Account Policy - With new attr alwaysRecordLoginAttr in config')

> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

tickets/ticket47714_test.py:149:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:223: in simple_bind_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x86966d0>
func = <built-in method result4 of LDAP object at 0x2943710>
args = (9, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Account Policy - With new attr alwaysRecordLoginAttr in config INFO:lib389:####### INFO:lib389:###############################################
Failed tickets/ticket47838_test.py::test_47838_run_4 2.27
topology_st = <lib389.topologies.TopologyMain object at 0x7f0d6d0>

def test_47838_run_4(topology_st):
"""
Check no nsSSL3Ciphers
Default ciphers are enabled.
default allowWeakCipher
"""
_header(topology_st, 'Test Case 5 - Check no nsSSL3Ciphers (default setting) with default allowWeakCipher')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.47838_3' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
topology_st.standalone.start(timeout=120)

enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())

log.info("Enabled ciphers: %d" % ecount)
log.info("Disabled ciphers: %d" % dcount)
global plus_all_ecount
global plus_all_dcount
if nss_version >= NSS323:
> assert ecount == 29
E assert 28 == 29

tickets/ticket47838_test.py:340: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 5 - Check no nsSSL3Ciphers (default setting) with default allowWeakCipher INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 28 INFO:lib389.utils:Disabled ciphers: 43
Failed tickets/ticket47838_test.py::test_47838_run_5 2.25
topology_st = <lib389.topologies.TopologyMain object at 0x7f0d6d0>

def test_47838_run_5(topology_st):
"""
Check nsSSL3Ciphers: default
Default ciphers are enabled.
default allowWeakCipher
"""
_header(topology_st, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.47838_4' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
topology_st.standalone.start(timeout=120)

enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())

log.info("Enabled ciphers: %d" % ecount)
log.info("Disabled ciphers: %d" % dcount)
global plus_all_ecount
global plus_all_dcount
if nss_version >= NSS323:
> assert ecount == 29
E assert 28 == 29

tickets/ticket47838_test.py:381: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 28 INFO:lib389.utils:Disabled ciphers: 43
Failed tickets/ticket47838_test.py::test_47838_run_8 2.29
topology_st = <lib389.topologies.TopologyMain object at 0x7f0d6d0>

def test_47838_run_8(topology_st):
"""
Check nsSSL3Ciphers: default + allowWeakCipher: off
Strong Default ciphers are enabled.
"""
_header(topology_st, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'),
(ldap.MOD_REPLACE, 'allowWeakCipher', 'off')])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.47838_7' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
topology_st.standalone.start(timeout=120)

enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())

log.info("Enabled ciphers: %d" % ecount)
log.info("Disabled ciphers: %d" % dcount)
global plus_all_ecount
global plus_all_dcount
if nss_version >= NSS323:
> assert ecount == 29
E assert 28 == 29

tickets/ticket47838_test.py:492: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off) INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 28 INFO:lib389.utils:Disabled ciphers: 43
Failed tickets/ticket47838_test.py::test_47838_run_9 2.24
topology_st = <lib389.topologies.TopologyMain object at 0x7f0d6d0>

def test_47838_run_9(topology_st):
"""
Check no nsSSL3Ciphers
Default ciphers are enabled.
allowWeakCipher: on
nsslapd-errorlog-level: 0
"""
_header(topology_st,
'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None),
(ldap.MOD_REPLACE, 'allowWeakCipher', 'on')])
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.47838_8' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
topology_st.standalone.start(timeout=120)

enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())

log.info("Enabled ciphers: %d" % ecount)
log.info("Disabled ciphers: %d" % dcount)
if nss_version >= NSS327:
> assert ecount == 34
E assert 33 == 34

tickets/ticket47838_test.py:535: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 33 INFO:lib389.utils:Disabled ciphers: 0
Failed tickets/ticket47838_test.py::test_47838_run_10 2.21
topology_st = <lib389.topologies.TopologyMain object at 0x7f0d6d0>

def test_47838_run_10(topology_st):
"""
Check nsSSL3Ciphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,
+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,
+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,
+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,
-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,
-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,
-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5
allowWeakCipher: on
nsslapd-errorlog-level: 0
"""
_header(topology_st,
'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on')

topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers',
'-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')])

log.info("\n######################### Restarting the server ######################\n")
topology_st.standalone.stop(timeout=10)
os.system('mv %s %s.47838_9' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
os.system('touch %s' % (topology_st.standalone.errlog))
time.sleep(1)
topology_st.standalone.start(timeout=120)

enabled = os.popen('egrep "SSL info:" %s | egrep \": enabled\" | wc -l' % topology_st.standalone.errlog)
disabled = os.popen('egrep "SSL info:" %s | egrep \": disabled\" | wc -l' % topology_st.standalone.errlog)
ecount = int(enabled.readline().rstrip())
dcount = int(disabled.readline().rstrip())

log.info("Enabled ciphers: %d" % ecount)
log.info("Disabled ciphers: %d" % dcount)
global plus_all_ecount
global plus_all_dcount
> assert ecount == 9
E assert 3 == 9

tickets/ticket47838_test.py:591: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 3 INFO:lib389.utils:Disabled ciphers: 0
Failed tickets/ticket47900_test.py::test_ticket47900 0.04
topology_st = <lib389.topologies.TopologyMain object at 0x832a950>

def test_ticket47900(topology_st):
"""
Test that password administrators/root DN can
bypass password syntax/policy.

We need to test how passwords are modified in
existing entries, and when adding new entries.

Create the Password Admin entry, but do not set
it as an admin yet. Use the entry to verify invalid
passwords are caught. Then activate the password
admin and make sure it can bypass password policy.
"""

# Prepare the Password Administator
entry = Entry(ADMIN_DN)
entry.setValues('objectclass', 'top', 'person')
entry.setValues('sn', ADMIN_NAME)
entry.setValues('cn', ADMIN_NAME)
entry.setValues('userpassword', ADMIN_PWD)

topology_st.standalone.log.info("Creating Password Administator entry %s..." % ADMIN_DN)
try:
topology_st.standalone.add_s(entry)
except ldap.LDAPError as e:
topology_st.standalone.log.error('Unexpected result ' + e.message['desc'])
assert False
topology_st.standalone.log.error("Failed to add Password Administator %s, error: %s "
% (ADMIN_DN, e.message['desc']))
assert False

topology_st.standalone.log.info("Configuring password policy...")
try:
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'),
(ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
(ldap.MOD_REPLACE, 'passwordMinCategories', '1'),
(ldap.MOD_REPLACE, 'passwordMinTokenLength', '1'),
(ldap.MOD_REPLACE, 'passwordExp', 'on'),
(ldap.MOD_REPLACE, 'passwordMinDigits', '1'),
(ldap.MOD_REPLACE, 'passwordMinSpecials', '1')])
except ldap.LDAPError as e:
topology_st.standalone.log.error('Failed configure password policy: ' + e.message['desc'])
assert False

#
# Add an aci to allow everyone all access (just makes things easier)
#
topology_st.standalone.log.info("Add aci to allow password admin to add/update entries...")

ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX
ACI_TARGETATTR = "(targetattr = *)"
ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) "
ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)"
ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
try:
topology_st.standalone.modify_s(SUFFIX, mod)
except ldap.LDAPError as e:
topology_st.standalone.log.error('Failed to add aci for password admin: ' + e.message['desc'])
assert False

#
# Bind as the Password Admin
#
topology_st.standalone.log.info("Bind as the Password Administator (before activating)...")
try:
topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
except ldap.LDAPError as e:
topology_st.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
> assert False
E assert False

tickets/ticket47900_test.py:97: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:Creating Password Administator entry cn=passwd_admin,dc=example,dc=com... INFO:lib389:Configuring password policy... INFO:lib389:Add aci to allow password admin to add/update entries... INFO:lib389:Bind as the Password Administator (before activating)... ERROR:lib389:Failed to bind as the Password Admin: Can't contact LDAP server
Failed tickets/ticket47950_test.py::test_ticket47950 0.04
topology_st = <lib389.topologies.TopologyMain object at 0x896a590>

def test_ticket47950(topology_st):
"""
Testing nsslapd-plugin-binddn-tracking does not cause issues around
access control and reconfiguring replication/repl agmt.
"""

log.info('Testing Ticket 47950 - Testing nsslapd-plugin-binddn-tracking')

#
# Turn on bind dn tracking
#
try:
topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-plugin-binddn-tracking', 'on')])
log.info('nsslapd-plugin-binddn-tracking enabled.')
except ldap.LDAPError as e:
log.error('Failed to enable bind dn tracking: ' + e.message['desc'])
assert False

#
# Add two users
#
try:
topology_st.standalone.add_s(Entry((USER1_DN, {
'objectclass': "top person inetuser".split(),
'userpassword': "password",
'sn': "1",
'cn': "user 1"})))
log.info('Added test user %s' % USER1_DN)
except ldap.LDAPError as e:
log.error('Failed to add %s: %s' % (USER1_DN, e.message['desc']))
assert False

try:
topology_st.standalone.add_s(Entry((USER2_DN, {
'objectclass': "top person inetuser".split(),
'sn': "2",
'cn': "user 2"})))
log.info('Added test user %s' % USER2_DN)
except ldap.LDAPError as e:
log.error('Failed to add user1: ' + e.message['desc'])
assert False

#
# Add an aci
#
try:
acival = '(targetattr ="cn")(version 3.0;acl "Test bind dn tracking"' + \
';allow (all) (userdn = "ldap:///%s");)' % USER1_DN

topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)])
log.info('Added aci')
except ldap.LDAPError as e:
log.error('Failed to add aci: ' + e.message['desc'])
assert False

#
# Make modification as user
#
try:
topology_st.standalone.simple_bind_s(USER1_DN, "password")
log.info('Bind as user %s successful' % USER1_DN)
except ldap.LDAPError as e:
log.error('Failed to bind as user1: ' + e.message['desc'])
> assert False
E assert False

tickets/ticket47950_test.py:84: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47950_test:Testing Ticket 47950 - Testing nsslapd-plugin-binddn-tracking INFO:tests.tickets.ticket47950_test:nsslapd-plugin-binddn-tracking enabled. INFO:tests.tickets.ticket47950_test:Added test user uid=user1,dc=example,dc=com INFO:tests.tickets.ticket47950_test:Added test user uid=user2,dc=example,dc=com INFO:tests.tickets.ticket47950_test:Added aci ERROR:tests.tickets.ticket47950_test:Failed to bind as user1: Can't contact LDAP server
Failed tickets/ticket47980_test.py::test_ticket47980 0.05
topology_st = <lib389.topologies.TopologyMain object at 0x88eff10>

def test_ticket47980(topology_st):
"""
Multiple COS pointer definitions that use the same attribute are not correctly ordered.
The cos plugin was incorrectly sorting the attribute indexes based on subtree, which lead
to the wrong cos attribute value being applied to the entry.
"""

log.info('Testing Ticket 47980 - Testing multiple nested COS pointer definitions are processed correctly')

# Add our nested branches
try:
topology_st.standalone.add_s(Entry((BRANCH1, {
'objectclass': 'top extensibleObject'.split(),
'ou': 'level1'
})))
except ldap.LDAPError as e:
log.error('Failed to add level1: error ' + e.message['desc'])
assert False

try:
topology_st.standalone.add_s(Entry((BRANCH2, {
'objectclass': 'top extensibleObject'.split(),
'ou': 'level2'
})))
except ldap.LDAPError as e:
log.error('Failed to add level2: error ' + e.message['desc'])
assert False

try:
topology_st.standalone.add_s(Entry((BRANCH3, {
'objectclass': 'top extensibleObject'.split(),
'uid': 'level3'
})))
except ldap.LDAPError as e:
log.error('Failed to add level3: error ' + e.message['desc'])
assert False

# People branch, might already exist
try:
topology_st.standalone.add_s(Entry((BRANCH4, {
'objectclass': 'top extensibleObject'.split(),
'ou': 'level4'
})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
log.error('Failed to add level4: error ' + e.message['desc'])
assert False

try:
topology_st.standalone.add_s(Entry((BRANCH5, {
'objectclass': 'top extensibleObject'.split(),
'ou': 'level5'
})))
except ldap.LDAPError as e:
log.error('Failed to add level5: error ' + e.message['desc'])
assert False

try:
topology_st.standalone.add_s(Entry((BRANCH6, {
'objectclass': 'top extensibleObject'.split(),
'uid': 'level6'
})))
except ldap.LDAPError as e:
log.error('Failed to add level6: error ' + e.message['desc'])
assert False

# Add users to each branch
try:
topology_st.standalone.add_s(Entry((USER1_DN, {
'objectclass': 'top extensibleObject'.split(),
'uid': 'user1'
})))
except ldap.LDAPError as e:
log.error('Failed to add user1: error ' + e.message['desc'])
assert False

try:
topology_st.standalone.add_s(Entry((USER2_DN, {
'objectclass': 'top extensibleObject'.split(),
'uid': 'user2'
})))
except ldap.LDAPError as e:
log.error('Failed to add user2: error ' + e.message['desc'])
assert False

try:
topology_st.standalone.add_s(Entry((USER3_DN, {
'objectclass': 'top extensibleObject'.split(),
'uid': 'user3'
})))
except ldap.LDAPError as e:
log.error('Failed to add user3: error ' + e.message['desc'])
assert False

try:
topology_st.standalone.add_s(Entry((USER4_DN, {
'objectclass': 'top extensibleObject'.split(),
'uid': 'user4'
})))
except ldap.LDAPError as e:
log.error('Failed to add user4: error ' + e.message['desc'])
assert False

try:
topology_st.standalone.add_s(Entry((USER5_DN, {
'objectclass': 'top extensibleObject'.split(),
'uid': 'user5'
})))
except ldap.LDAPError as e:
log.error('Failed to add user5: error ' + e.message['desc'])
assert False

try:
topology_st.standalone.add_s(Entry((USER6_DN, {
'objectclass': 'top extensibleObject'.split(),
'uid': 'user6'
})))
except ldap.LDAPError as e:
log.error('Failed to add user6: error ' + e.message['desc'])
assert False

# Enable password policy
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
except ldap.LDAPError as e:
log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
assert False

#
# Add subtree policy to branch 1
#
# Add the container
try:
topology_st.standalone.add_s(Entry((BRANCH1_CONTAINER, {
'objectclass': 'top nsContainer'.split(),
'cn': 'nsPwPolicyContainer'
})))
except ldap.LDAPError as e:
log.error('Failed to add subtree container for level1: error ' + e.message['desc'])
> assert False
E assert False

tickets/ticket47980_test.py:214: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47980_test:Testing Ticket 47980 - Testing multiple nested COS pointer definitions are processed correctly ERROR:tests.tickets.ticket47980_test:Failed to add subtree container for level1: error Can't contact LDAP server
Failed tickets/ticket47981_test.py::test_ticket47981 0.08
topology_st = <lib389.topologies.TopologyMain object at 0x8ad66d0>

def test_ticket47981(topology_st):
"""
If there are multiple suffixes, and the last suffix checked does not contain any COS entries,
while other suffixes do, then the vattr cache is not invalidated as it should be. Then any
cached entries will still contain the old COS attributes/values.
"""

log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users')

#
# Create a second backend that does not have any COS entries
#
log.info('Adding second suffix that will not contain any COS entries...\n')

topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME})
topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME)
try:
topology_st.standalone.add_s(Entry((SECOND_SUFFIX, {
'objectclass': 'top organization'.split(),
'o': BE_NAME})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
log.error('Failed to create suffix entry: error ' + e.message['desc'])
assert False

#
# Add People branch, it might already exist
#
log.info('Add our test entries to the default suffix, and proceed with the test...')

try:
topology_st.standalone.add_s(Entry((BRANCH, {
'objectclass': 'top extensibleObject'.split(),
'ou': 'level4'
})))
except ldap.ALREADY_EXISTS:
pass
except ldap.LDAPError as e:
log.error('Failed to add ou=people: error ' + e.message['desc'])
assert False

#
# Add a user to the branch
#
try:
topology_st.standalone.add_s(Entry((USER_DN, {
'objectclass': 'top extensibleObject'.split(),
'uid': 'user1'
})))
except ldap.LDAPError as e:
log.error('Failed to add user1: error ' + e.message['desc'])
assert False

#
# Enable password policy and add the subtree policy
#
try:
topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
except ldap.LDAPError as e:
log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
assert False

> addSubtreePwPolicy(topology_st.standalone)

tickets/ticket47981_test.py:176:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

inst = <lib389.DirSrv object at 0x8ad6790>

def addSubtreePwPolicy(inst):
#
# Add subtree policy to the people branch
#
try:
inst.add_s(Entry((BRANCH_CONTAINER, {
'objectclass': 'top nsContainer'.split(),
'cn': 'nsPwPolicyContainer'
})))
except ldap.LDAPError as e:
log.error('Failed to add subtree container for ou=people: error ' + e.message['desc'])
> assert False
E assert False

tickets/ticket47981_test.py:41: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47981_test:Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users INFO:tests.tickets.ticket47981_test:Adding second suffix that will not contain any COS entries... INFO:lib389:List backend with suffix=o=netscaperoot INFO:lib389:Creating a local backend INFO:lib389:List backend cn=netscaperoot,cn=ldbm database,cn=plugins,cn=config INFO:lib389:Found entry dn: cn=netscaperoot,cn=ldbm database,cn=plugins,cn=config cn: netscaperoot nsslapd-cachememsize: 512000 nsslapd-cachesize: -1 nsslapd-directory: /var/lib/dirsrv/slapd-standalone_1/db/netscaperoot nsslapd-dncachememsize: 16777216 nsslapd-readonly: off nsslapd-require-index: off nsslapd-suffix: o=netscaperoot objectClass: top objectClass: extensibleObject objectClass: nsBackendInstance INFO:lib389:Entry dn: cn="o=netscaperoot",cn=mapping tree,cn=config cn: o=netscaperoot nsslapd-backend: netscaperoot nsslapd-state: backend objectclass: top objectclass: extensibleObject objectclass: nsMappingTree INFO:lib389:Found entry dn: cn=o\3Dnetscaperoot,cn=mapping tree,cn=config cn: o=netscaperoot nsslapd-backend: netscaperoot nsslapd-state: backend objectClass: top objectClass: extensibleObject objectClass: nsMappingTree INFO:tests.tickets.ticket47981_test:Add our test entries to the default suffix, and proceed with the test... ERROR:tests.tickets.ticket47981_test:Failed to add subtree container for ou=people: error Can't contact LDAP server
Failed tickets/ticket48228_test.py::test_ticket48228_test_global_policy 0.05
topology_st = <lib389.topologies.TopologyMain object at 0xf040850>

def test_ticket48228_test_global_policy(topology_st):
"""
Check global password policy
"""

log.info(' Set inhistory = 6')
set_global_pwpolicy(topology_st, 6)

log.info(' Bind as directory manager')
log.info("Bind as %s" % DN_DM)
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

log.info(' Add an entry' + USER1_DN)
try:
topology_st.standalone.add_s(
Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
'sn': '1',
'cn': 'user 1',
'uid': 'user1',
'givenname': 'user',
'mail': 'user1@example.com',
'userpassword': 'password'})))
except ldap.LDAPError as e:
log.fatal('test_ticket48228: Failed to add user' + USER1_DN + ': error ' + e.message['desc'])
> assert False
E assert False

tickets/ticket48228_test.py:162: AssertionError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48228_test: Set inhistory = 6 INFO:tests.tickets.ticket48228_test: +++++ Enable global password policy +++++ INFO:tests.tickets.ticket48228_test: Set global password history on INFO:tests.tickets.ticket48228_test: Set global passwords in history INFO:tests.tickets.ticket48228_test: Bind as directory manager INFO:tests.tickets.ticket48228_test:Bind as cn=Directory Manager INFO:tests.tickets.ticket48228_test: Add an entryuid=user1,dc=example,dc=com CRITICAL:tests.tickets.ticket48228_test:test_ticket48228: Failed to add useruid=user1,dc=example,dc=com: error Can't contact LDAP server
Failed tickets/ticket48228_test.py::test_ticket48228_test_subtree_policy 0.00
topology_st = <lib389.topologies.TopologyMain object at 0xf040850>

def test_ticket48228_test_subtree_policy(topology_st):
"""
Check subtree level password policy
"""

log.info(' Set inhistory = 6')
> set_subtree_pwpolicy(topology_st)

tickets/ticket48228_test.py:203:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tickets/ticket48228_test.py:57: in set_subtree_pwpolicy
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xf05d450>
func = <built-in method simple_bind of LDAP object at 0x1e44260>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48228_test: Set inhistory = 6 INFO:tests.tickets.ticket48228_test: +++++ Enable subtree level password policy +++++
Failed tickets/ticket48234_test.py::test_ticket48234 0.03
topology_st = <lib389.topologies.TopologyMain object at 0xf064bd0>

def test_ticket48234(topology_st):
"""
Test aci which contains an extensible filter.
shutdown
"""

log.info('Bind as root DN')
try:
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
except ldap.LDAPError as e:
topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
assert False

ouname = 'outest'
username = 'admin'
passwd = 'Password'
deniedattr = 'telephonenumber'
log.info('Add aci which contains extensible filter.')
aci_text = ('(targetattr = "%s")' % (deniedattr) +
'(target = "ldap:///%s")' % (DEFAULT_SUFFIX) +
'(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)' +
'(userdn = "ldap:///%s??sub?(&(cn=%s)(ou:dn:=%s))");)' % (DEFAULT_SUFFIX, username, ouname))

try:
topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)])
except ldap.LDAPError as e:
log.error('Failed to add aci: (%s) error %s' % (aci_text, e.message['desc']))
assert False

log.info('Add entries ...')
for idx in range(0, 2):
ou0 = 'OU%d' % idx
log.info('adding %s under %s...' % (ou0, DEFAULT_SUFFIX))
add_ou_entry(topology_st.standalone, ou0, DEFAULT_SUFFIX)
parent = 'ou=%s,%s' % (ou0, DEFAULT_SUFFIX)
log.info('adding %s under %s...' % (ouname, parent))
add_ou_entry(topology_st.standalone, ouname, parent)

for idx in range(0, 2):
parent = 'ou=%s,ou=OU%d,%s' % (ouname, idx, DEFAULT_SUFFIX)
log.info('adding %s under %s...' % (username, parent))
add_user_entry(topology_st.standalone, username, passwd, parent)

binddn = 'cn=%s,%s' % (username, parent)
log.info('Bind as user %s' % binddn)
try:
topology_st.standalone.simple_bind_s(binddn, passwd)
except ldap.LDAPError as e:
> topology_st.standalone.log.error(bindn + ' failed to authenticate: ' + e.message['desc'])
E NameError: global name 'bindn' is not defined

tickets/ticket48234_test.py:73: NameError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48234_test:Bind as root DN INFO:tests.tickets.ticket48234_test:Add aci which contains extensible filter. INFO:tests.tickets.ticket48234_test:Add entries ... INFO:tests.tickets.ticket48234_test:adding OU0 under dc=example,dc=com... INFO:tests.tickets.ticket48234_test:adding outest under ou=OU0,dc=example,dc=com... INFO:tests.tickets.ticket48234_test:adding OU1 under dc=example,dc=com... INFO:tests.tickets.ticket48234_test:adding outest under ou=OU1,dc=example,dc=com... INFO:tests.tickets.ticket48234_test:adding admin under ou=outest,ou=OU0,dc=example,dc=com... INFO:tests.tickets.ticket48234_test:adding admin under ou=outest,ou=OU1,dc=example,dc=com... INFO:tests.tickets.ticket48234_test:Bind as user cn=admin,ou=outest,ou=OU1,dc=example,dc=com
Failed tickets/ticket48272_test.py::test_ticket48272 2.21
topology_st = <lib389.topologies.TopologyMain object at 0xe6b8790>

def test_ticket48272(topology_st):
"""
Test the functionality of the addn bind plugin. This should allow users
of the type "name" or "name@domain.com" to bind.
"""

# There will be a better way to do this in the future.
topology_st.standalone.add_s(Entry((
"cn=addn,cn=plugins,cn=config", {
"objectClass": "top nsSlapdPlugin extensibleObject".split(),
"cn": "addn",
"nsslapd-pluginPath": "libaddn-plugin",
"nsslapd-pluginInitfunc": "addn_init",
"nsslapd-pluginType": "preoperation",
"nsslapd-pluginEnabled": "on",
"nsslapd-pluginId": "addn",
"nsslapd-pluginVendor": "389 Project",
"nsslapd-pluginVersion": "1.3.6.0",
"nsslapd-pluginDescription": "Allow AD DN style bind names to LDAP",
"addn_default_domain": "example.com",
}
)))

topology_st.standalone.add_s(Entry((
"cn=example.com,cn=addn,cn=plugins,cn=config", {
"objectClass": "top extensibleObject".split(),
"cn": "example.com",
"addn_base": "ou=People,%s" % DEFAULT_SUFFIX,
"addn_filter": "(&(objectClass=account)(uid=%s))",
}
)))

topology_st.standalone.restart(60)

# Add a user
_create_user(topology_st.standalone, USER1, USER1_DN)

if DEBUGGING is not False:
print("Attach now")
time.sleep(20)

# Make sure our binds still work.
> assert (_bind(USER1_DN, PW))

tickets/ticket48272_test.py:90:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tickets/ticket48272_test.py:41: in _bind
conn.simple_bind_s(name, cred)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:223: in simple_bind_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <ldap.ldapobject.SimpleLDAPObject instance at 0x34e32d8>
func = <built-in method result4 of LDAP object at 0x2943c88>
args = (1, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Failed tickets/ticket48366_test.py::test_ticket48366_search_user 0.02
topology_st = <lib389.topologies.TopologyMain object at 0xe712d90>

def test_ticket48366_search_user(topology_st):
proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: " + TEST_USER_DN)
# searching as test user should return one entry from the green subtree
> topology_st.standalone.simple_bind_s(TEST_USER_DN, PASSWORD)

tickets/ticket48366_test.py:112:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:223: in simple_bind_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xe712c90>
func = <built-in method result4 of LDAP object at 0x29026c0>
args = (13, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed tickets/ticket48366_test.py::test_ticket48366_search_dm 0.00
topology_st = <lib389.topologies.TopologyMain object at 0xe712d90>

def test_ticket48366_search_dm(topology_st):
# searching as directory manager should return one entries from both subtrees
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

tickets/ticket48366_test.py:128:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xe712c90>
func = <built-in method simple_bind of LDAP object at 0x29026c0>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
Failed tickets/ticket48808_test.py::test_ticket48808 0.14
topology_st = <lib389.topologies.TopologyMain object at 0xf902c10>
test_user = None

def test_ticket48808(topology_st, test_user):
log.info('Run multiple paging controls on a single connection')
users_num = 100
page_size = 30
users_list = add_users(topology_st, users_num)
search_flt = r'(uid=test*)'
searchreq_attrlist = ['dn', 'sn']

log.info('Set user bind')
> topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD)

tickets/ticket48808_test.py:163:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:223: in simple_bind_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xf9029d0>
func = <built-in method result4 of LDAP object at 0x2b43288>
args = (103, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48808_test:Run multiple paging controls on a single connection INFO:tests.tickets.ticket48808_test:Adding 100 users INFO:tests.tickets.ticket48808_test:Set user bind
Failed tickets/ticket48896_test.py::test_ticket48896 0.04
topology_st = <lib389.topologies.TopologyMain object at 0x8fb3b10>

def test_ticket48896(topology_st):
"""
"""
log.info('Testing Ticket 48896 - Default Setting for passwordMinTokenLength does not work')

log.info("Setting global password policy with password syntax.")
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])

config = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, 'cn=*')
mintokenlen = config[0].getValue('passwordMinTokenLength')
history = config[0].getValue('passwordInHistory')

log.info('Default passwordMinTokenLength == %s' % mintokenlen)
log.info('Default passwordInHistory == %s' % history)

log.info('Adding a user.')
curpw = 'password'
topology_st.standalone.add_s(Entry((TESTDN,
{'objectclass': "top person organizationalPerson inetOrgPerson".split(),
'cn': 'test user',
'sn': 'user',
> 'userPassword': curpw})))

tickets/ticket48896_test.py:86:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:157: in inner
return f(ent.dn, ent.toTupleList(), *args[2:])
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:210: in add_s
return self.result(msgid,all=1,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:129: in inner
objtype, data = f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:503: in result
resp_type, resp_data, resp_msgid = self.result2(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:507: in result2
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all,timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0x8fb3ed0>
func = <built-in method result4 of LDAP object at 0x34b2ad0>
args = (5, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48896_test:Testing Ticket 48896 - Default Setting for passwordMinTokenLength does not work INFO:tests.tickets.ticket48896_test:Setting global password policy with password syntax. INFO:tests.tickets.ticket48896_test:Default passwordMinTokenLength == 3 INFO:tests.tickets.ticket48896_test:Default passwordInHistory == 6 INFO:tests.tickets.ticket48896_test:Adding a user.
Failed tickets/ticket48956_test.py::test_ticket48956 2.15
topology_st = <lib389.topologies.TopologyMain object at 0x8fa9390>

def test_ticket48956(topology_st):
"""Write your testcase here...

Also, if you need any testcase initialization,
please, write additional fixture for that(include finalizer).

"""

topology_st.standalone.modify_s(ACCT_POLICY_PLUGIN_DN,
[(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ACCT_POLICY_CONFIG_DN)])

topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
(ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'),
(ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'),
(ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
(ldap.MOD_REPLACE, 'limitattrname',
'accountInactivityLimit')])

# Enable the plugins
topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)

> topology_st.standalone.restart(timeout=10)

tickets/ticket48956_test.py:94:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:1258: in restart
self.start(timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:1141: in start
"dirsrv@%s" % self.serverid])
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

popenargs = (['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1'],)
kwargs = {}, retcode = 1
cmd = ['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1']

def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.

The arguments are the same as for the Popen constructor. Example:

check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
> raise CalledProcessError(retcode, cmd)
E CalledProcessError: Command '['/usr/bin/systemctl', 'start', 'dirsrv@standalone_1']' returned non-zero exit status 1

/usr/lib64/python2.7/subprocess.py:542: CalledProcessError
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
Job for dirsrv@standalone_1.service failed because a fatal signal was delivered to the control process. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details.
Failed tickets/ticket548_test.py::test_ticket548_test_with_no_policy 0.03
topology_st = <lib389.topologies.TopologyMain object at 0xe82ec50>

def test_ticket548_test_with_no_policy(topology_st):
"""
Check shadowAccount under no password policy
"""
log.info("Case 1. No password policy")

log.info("Bind as %s" % DN_DM)
topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

log.info('Add an entry' + USER1_DN)
try:
topology_st.standalone.add_s(
Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(),
'sn': '1',
'cn': 'user 1',
'uid': 'user1',
'givenname': 'user',
'mail': 'user1@' + DEFAULT_SUFFIX,
'userpassword': USER_PW})))
except ldap.LDAPError as e:
log.fatal('test_ticket548: Failed to add user' + USER1_DN + ': error ' + e.message['desc'])
assert False

edate = int(time.time() / (60 * 60 * 24))
log.info('Search entry %s' % USER1_DN)

log.info("Bind as %s" % USER1_DN)
> topology_st.standalone.simple_bind_s(USER1_DN, USER_PW)

tickets/ticket548_test.py:201:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:223: in simple_bind_s
resp_type, resp_data, resp_msgid, resp_ctrls = self.result3(msgid,all=1,timeout=self.timeout)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:514: in result3
resp_ctrl_classes=resp_ctrl_classes
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:521: in result4
ldap_result = self._ldap_call(self._l.result4,msgid,all,timeout,add_ctrls,add_intermediates,add_extop)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xe80a550>
func = <built-in method result4 of LDAP object at 0x293b350>
args = (4, 1, -1, 0, 0, 0), kwargs = {}, diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket548_test:Case 1. No password policy INFO:tests.tickets.ticket548_test:Bind as cn=Directory Manager INFO:tests.tickets.ticket548_test:Add an entryuid=user1,dc=example,dc=com INFO:tests.tickets.ticket548_test:Search entry uid=user1,dc=example,dc=com INFO:tests.tickets.ticket548_test:Bind as uid=user1,dc=example,dc=com
Failed tickets/ticket548_test.py::test_ticket548_test_global_policy 0.00
topology_st = <lib389.topologies.TopologyMain object at 0xe82ec50>

def test_ticket548_test_global_policy(topology_st):
"""
Check shadowAccount with global password policy
"""

log.info("Case 2. Check shadowAccount with global password policy")

log.info("Bind as %s" % DN_DM)
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

tickets/ticket548_test.py:216:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xe80a550>
func = <built-in method simple_bind of LDAP object at 0x293b350>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket548_test:Case 2. Check shadowAccount with global password policy INFO:tests.tickets.ticket548_test:Bind as cn=Directory Manager
Failed tickets/ticket548_test.py::test_ticket548_test_subtree_policy 0.00
topology_st = <lib389.topologies.TopologyMain object at 0xe82ec50>

def test_ticket548_test_subtree_policy(topology_st):
"""
Check shadowAccount with subtree level password policy
"""

log.info("Case 3. Check shadowAccount with subtree level password policy")

log.info("Bind as %s" % DN_DM)
> topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)

tickets/ticket548_test.py:307:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:222: in simple_bind_s
msgid = self.simple_bind(who,cred,serverctrls,clientctrls)
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:216: in simple_bind
return self._ldap_call(self._l.simple_bind,who,cred,RequestControlTuples(serverctrls),RequestControlTuples(clientctrls))
/mnt/tests/rhds/tests/upstream/src/lib389/lib389/__init__.py:161: in inner
return f(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

self = <lib389.DirSrv object at 0xe80a550>
func = <built-in method simple_bind of LDAP object at 0x293b350>
args = ('cn=Directory Manager', 'password', None, None), kwargs = {}
diagnostic_message_success = None
e = SERVER_DOWN({'desc': "Can't contact LDAP server"},)

def _ldap_call(self,func,*args,**kwargs):
"""
Wrapper method mainly for serializing calls into OpenLDAP libs
and trace logs
"""
self._ldap_object_lock.acquire()
if __debug__:
if self._trace_level>=1:
self._trace_file.write('*** %s %s - %s\n%s\n' % (
repr(self),
self._uri,
'.'.join((self.__class__.__name__,func.__name__)),
pprint.pformat((args,kwargs))
))
if self._trace_level>=9:
traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
diagnostic_message_success = None
try:
try:
> result = func(*args,**kwargs)
E SERVER_DOWN: {'desc': "Can't contact LDAP server"}

/usr/lib64/python2.7/site-packages/ldap/ldapobject.py:106: SERVER_DOWN
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket548_test:Case 3. Check shadowAccount with subtree level password policy INFO:tests.tickets.ticket548_test:Bind as cn=Directory Manager
Passed suites/basic/basic_test.py::test_basic_ops 0.02
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.basic.basic_test:Initializing the "basic" test suite INFO:lib389:Import task import_03152017_041650 for file /var/lib/dirsrv/slapd-standalone_1/ldif/Example.ldif completed successfully ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test:Running test_basic_ops... INFO:tests.suites.basic.basic_test:test_basic_ops: PASSED
Passed suites/basic/basic_test.py::test_basic_import_export 76.73
----------------------------- Captured stdout call -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test:Running test_basic_import_export... INFO:lib389:Import task import_03152017_041654 for file /var/lib/dirsrv/slapd-standalone_1/ldif/basic_import.ldif completed successfully INFO:lib389:Running script: /usr/sbin/ns-slapd ldif2db -D /etc/dirsrv/slapd-standalone_1 -n userRoot -i /var/lib/dirsrv/slapd-standalone_1/ldif/basic_import.ldif [15/Mar/2017:04:17:29.070741005 -0400] - INFO - dblayer_instance_start - Import is running with nsslapd-db-private-import-mem on; No other process is allowed to access the database [15/Mar/2017:04:17:29.072433959 -0400] - INFO - check_and_set_import_cache - pagesize: 4096, pages: 470916, procpages: 3012 [15/Mar/2017:04:17:29.072955854 -0400] - INFO - check_and_set_import_cache - Import allocates 590216KB import cache. [15/Mar/2017:04:17:29.092013870 -0400] - INFO - import_main_offline - import userRoot: Beginning import job... [15/Mar/2017:04:17:29.093233170 -0400] - INFO - import_main_offline - import userRoot: Index buffering enabled with bucket size 100 [15/Mar/2017:04:17:29.294468296 -0400] - INFO - import_producer - import userRoot: Processing file "/var/lib/dirsrv/slapd-standalone_1/ldif/basic_import.ldif" [15/Mar/2017:04:17:50.295105794 -0400] - INFO - import_monitor_threads - import userRoot: Processed 45220 entries -- average rate 2153.3/sec, recent rate 2153.3/sec, hit ratio 0% [15/Mar/2017:04:17:52.475758507 -0400] - INFO - import_producer - import userRoot: Finished scanning file "/var/lib/dirsrv/slapd-standalone_1/ldif/basic_import.ldif" (50006 entries) [15/Mar/2017:04:17:52.874121019 -0400] - INFO - import_monitor_threads - import userRoot: Workers finished; cleaning up... [15/Mar/2017:04:17:53.075452793 -0400] - INFO - import_monitor_threads - import userRoot: Workers cleaned up. [15/Mar/2017:04:17:53.076267729 -0400] - INFO - import_main_offline - import userRoot: Cleaning up producer thread... [15/Mar/2017:04:17:53.076728323 -0400] - INFO - import_main_offline - import userRoot: Indexing complete. Post-processing... [15/Mar/2017:04:17:53.077801886 -0400] - INFO - import_main_offline - import userRoot: Generating numsubordinates (this may take several minutes to complete)... [15/Mar/2017:04:17:53.081026290 -0400] - INFO - import_main_offline - import userRoot: Generating numSubordinates complete. [15/Mar/2017:04:17:53.082114138 -0400] - INFO - ldbm_get_nonleaf_ids - import userRoot: Gathering ancestorid non-leaf IDs... [15/Mar/2017:04:17:53.082641932 -0400] - INFO - ldbm_get_nonleaf_ids - import userRoot: Finished gathering ancestorid non-leaf IDs. [15/Mar/2017:04:17:53.084768103 -0400] - INFO - ldbm_ancestorid_new_idl_create_index - import userRoot: Creating ancestorid index (new idl)... [15/Mar/2017:04:17:53.228568822 -0400] - INFO - ldbm_ancestorid_new_idl_create_index - import userRoot: Created ancestorid index (new idl). [15/Mar/2017:04:17:53.229151718 -0400] - INFO - import_main_offline - import userRoot: Flushing caches... [15/Mar/2017:04:17:53.230091099 -0400] - INFO - import_main_offline - import userRoot: Closing files... [15/Mar/2017:04:17:53.743299673 -0400] - INFO - dblayer_pre_close - All database threads now stopped [15/Mar/2017:04:17:53.744152109 -0400] - INFO - import_main_offline - import userRoot: Import complete. Processed 50006 entries in 24 seconds. (2083.58 entries/sec) INFO:lib389:Export task export_03152017_041754 for file /var/lib/dirsrv/slapd-standalone_1/ldif/export.ldif completed successfully INFO:lib389:Running script: /usr/sbin/ns-slapd db2ldif -D /etc/dirsrv/slapd-standalone_1 -n userRoot -s dc=example,dc=com -a /var/lib/dirsrv/slapd-standalone_1/ldif/export.ldif ldiffile: /var/lib/dirsrv/slapd-standalone_1/ldif/export.ldif [15/Mar/2017:04:18:02.639212458 -0400] - INFO - export_one_entry - export userRoot: Processed 1000 entries (1%). [15/Mar/2017:04:18:02.730710095 -0400] - INFO - export_one_entry - export userRoot: Processed 2000 entries (3%). [15/Mar/2017:04:18:02.832378691 -0400] - INFO - export_one_entry - export userRoot: Processed 3000 entries (5%). [15/Mar/2017:04:18:02.922890291 -0400] - INFO - export_one_entry - export userRoot: Processed 4000 entries (7%). [15/Mar/2017:04:18:03.009427624 -0400] - INFO - export_one_entry - export userRoot: Processed 5000 entries (9%). [15/Mar/2017:04:18:03.098034810 -0400] - INFO - export_one_entry - export userRoot: Processed 6000 entries (11%). [15/Mar/2017:04:18:03.184200067 -0400] - INFO - export_one_entry - export userRoot: Processed 7000 entries (13%). [15/Mar/2017:04:18:03.267507219 -0400] - INFO - export_one_entry - export userRoot: Processed 8000 entries (15%). [15/Mar/2017:04:18:03.352048904 -0400] - INFO - export_one_entry - export userRoot: Processed 9000 entries (17%). [15/Mar/2017:04:18:03.437867954 -0400] - INFO - export_one_entry - export userRoot: Processed 10000 entries (19%). [15/Mar/2017:04:18:03.524868682 -0400] - INFO - export_one_entry - export userRoot: Processed 11000 entries (21%). [15/Mar/2017:04:18:03.609302412 -0400] - INFO - export_one_entry - export userRoot: Processed 12000 entries (23%). [15/Mar/2017:04:18:03.693524926 -0400] - INFO - export_one_entry - export userRoot: Processed 13000 entries (25%). [15/Mar/2017:04:18:03.777686413 -0400] - INFO - export_one_entry - export userRoot: Processed 14000 entries (27%). [15/Mar/2017:04:18:03.861174161 -0400] - INFO - export_one_entry - export userRoot: Processed 15000 entries (29%). [15/Mar/2017:04:18:03.944529882 -0400] - INFO - export_one_entry - export userRoot: Processed 16000 entries (31%). [15/Mar/2017:04:18:04.029957177 -0400] - INFO - export_one_entry - export userRoot: Processed 17000 entries (33%). [15/Mar/2017:04:18:04.117478080 -0400] - INFO - export_one_entry - export userRoot: Processed 18000 entries (35%). [15/Mar/2017:04:18:04.203985980 -0400] - INFO - export_one_entry - export userRoot: Processed 19000 entries (37%). [15/Mar/2017:04:18:04.290112248 -0400] - INFO - export_one_entry - export userRoot: Processed 20000 entries (39%). [15/Mar/2017:04:18:04.374980761 -0400] - INFO - export_one_entry - export userRoot: Processed 21000 entries (41%). [15/Mar/2017:04:18:04.461577631 -0400] - INFO - export_one_entry - export userRoot: Processed 22000 entries (43%). [15/Mar/2017:04:18:04.548111431 -0400] - INFO - export_one_entry - export userRoot: Processed 23000 entries (45%). [15/Mar/2017:04:18:04.637063629 -0400] - INFO - export_one_entry - export userRoot: Processed 24000 entries (47%). [15/Mar/2017:04:18:04.720931347 -0400] - INFO - export_one_entry - export userRoot: Processed 25000 entries (49%). [15/Mar/2017:04:18:04.805804576 -0400] - INFO - export_one_entry - export userRoot: Processed 26000 entries (51%). [15/Mar/2017:04:18:04.890717237 -0400] - INFO - export_one_entry - export userRoot: Processed 27000 entries (53%). [15/Mar/2017:04:18:04.975356482 -0400] - INFO - export_one_entry - export userRoot: Processed 28000 entries (55%). [15/Mar/2017:04:18:05.062602254 -0400] - INFO - export_one_entry - export userRoot: Processed 29000 entries (57%). [15/Mar/2017:04:18:05.152357893 -0400] - INFO - export_one_entry - export userRoot: Processed 30000 entries (59%). [15/Mar/2017:04:18:05.239410595 -0400] - INFO - export_one_entry - export userRoot: Processed 31000 entries (61%). [15/Mar/2017:04:18:05.326656833 -0400] - INFO - export_one_entry - export userRoot: Processed 32000 entries (63%). [15/Mar/2017:04:18:05.410722525 -0400] - INFO - export_one_entry - export userRoot: Processed 33000 entries (65%). [15/Mar/2017:04:18:05.496024871 -0400] - INFO - export_one_entry - export userRoot: Processed 34000 entries (67%). [15/Mar/2017:04:18:05.582004426 -0400] - INFO - export_one_entry - export userRoot: Processed 35000 entries (69%). [15/Mar/2017:04:18:05.667779954 -0400] - INFO - export_one_entry - export userRoot: Processed 36000 entries (71%). [15/Mar/2017:04:18:05.760366169 -0400] - INFO - export_one_entry - export userRoot: Processed 37000 entries (73%). [15/Mar/2017:04:18:05.844956456 -0400] - INFO - export_one_entry - export userRoot: Processed 38000 entries (75%). [15/Mar/2017:04:18:05.931288835 -0400] - INFO - export_one_entry - export userRoot: Processed 39000 entries (77%). [15/Mar/2017:04:18:06.017804884 -0400] - INFO - export_one_entry - export userRoot: Processed 40000 entries (79%). [15/Mar/2017:04:18:06.101344665 -0400] - INFO - export_one_entry - export userRoot: Processed 41000 entries (81%). [15/Mar/2017:04:18:06.187441313 -0400] - INFO - export_one_entry - export userRoot: Processed 42000 entries (83%). [15/Mar/2017:04:18:06.295241569 -0400] - INFO - export_one_entry - export userRoot: Processed 43000 entries (85%). [15/Mar/2017:04:18:06.389422415 -0400] - INFO - export_one_entry - export userRoot: Processed 44000 entries (87%). [15/Mar/2017:04:18:06.478262725 -0400] - INFO - export_one_entry - export userRoot: Processed 45000 entries (89%). [15/Mar/2017:04:18:06.570956818 -0400] - INFO - export_one_entry - export userRoot: Processed 46000 entries (91%). [15/Mar/2017:04:18:06.663020420 -0400] - INFO - export_one_entry - export userRoot: Processed 47000 entries (93%). [15/Mar/2017:04:18:06.753150291 -0400] - INFO - export_one_entry - export userRoot: Processed 48000 entries (95%). [15/Mar/2017:04:18:06.844526364 -0400] - INFO - export_one_entry - export userRoot: Processed 49000 entries (97%). [15/Mar/2017:04:18:06.939179882 -0400] - INFO - export_one_entry - export userRoot: Processed 50000 entries (99%). [15/Mar/2017:04:18:06.940538762 -0400] - INFO - ldbm_back_ldbm2ldif - export userRoot: Processed 50006 entries (100%). [15/Mar/2017:04:18:06.944098970 -0400] - INFO - dblayer_pre_close - All database threads now stopped INFO:lib389:Import task import_03152017_041807 for file /var/lib/dirsrv/slapd-standalone_1/ldif/Example.ldif completed successfully INFO:tests.suites.basic.basic_test:test_basic_import_export: PASSED
Passed suites/basic/basic_test.py::test_basic_backup 9.73
----------------------------- Captured stdout call -----------------------------
OK group dirsrv exists OK user dirsrv exists Back up directory: /var/lib/dirsrv/slapd-standalone_1/bak/backup_test OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test:Running test_basic_backup... INFO:lib389:Backup task backup_03152017_041809 completed successfully INFO:lib389:Restore task restore_03152017_041811 completed successfully INFO:lib389:Running script: /usr/sbin/db2bak /var/lib/dirsrv/slapd-standalone_1/bak/backup_test -Z standalone_1 [15/Mar/2017:04:18:16.341703482 -0400] - INFO - ldbm_back_ldbm2archive - /var/lib/dirsrv/slapd-standalone_1/bak/backup_test exists. Renaming to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test.bak [15/Mar/2017:04:18:16.342696108 -0400] - INFO - dblayer_copy_directory - Backing up file 1 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/entryrdn.db) [15/Mar/2017:04:18:16.343265897 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryrdn.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/entryrdn.db [15/Mar/2017:04:18:16.343780354 -0400] - INFO - dblayer_copy_directory - Backing up file 2 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/telephoneNumber.db) [15/Mar/2017:04:18:16.344285210 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/telephoneNumber.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/telephoneNumber.db [15/Mar/2017:04:18:16.344783204 -0400] - INFO - dblayer_copy_directory - Backing up file 3 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/uniquemember.db) [15/Mar/2017:04:18:16.345262774 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uniquemember.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/uniquemember.db [15/Mar/2017:04:18:16.345648581 -0400] - INFO - dblayer_copy_directory - Backing up file 4 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/DBVERSION) [15/Mar/2017:04:18:16.346182500 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/DBVERSION to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/DBVERSION [15/Mar/2017:04:18:16.346672221 -0400] - INFO - dblayer_copy_directory - Backing up file 5 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/cn.db) [15/Mar/2017:04:18:16.347153707 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/cn.db [15/Mar/2017:04:18:16.347642047 -0400] - INFO - dblayer_copy_directory - Backing up file 6 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/ancestorid.db) [15/Mar/2017:04:18:16.348008688 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/ancestorid.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/ancestorid.db [15/Mar/2017:04:18:16.348399237 -0400] - INFO - dblayer_copy_directory - Backing up file 7 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/uid.db) [15/Mar/2017:04:18:16.348756802 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uid.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/uid.db [15/Mar/2017:04:18:16.349181703 -0400] - INFO - dblayer_copy_directory - Backing up file 8 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/entryusn.db) [15/Mar/2017:04:18:16.349645986 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryusn.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/entryusn.db [15/Mar/2017:04:18:16.350067495 -0400] - INFO - dblayer_copy_directory - Backing up file 9 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/parentid.db) [15/Mar/2017:04:18:16.350461297 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/parentid.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/parentid.db [15/Mar/2017:04:18:16.350855055 -0400] - INFO - dblayer_copy_directory - Backing up file 10 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/givenName.db) [15/Mar/2017:04:18:16.351264171 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/givenName.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/givenName.db [15/Mar/2017:04:18:16.351777518 -0400] - INFO - dblayer_copy_directory - Backing up file 11 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/nsuniqueid.db) [15/Mar/2017:04:18:16.352168799 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/nsuniqueid.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/nsuniqueid.db [15/Mar/2017:04:18:16.352585286 -0400] - INFO - dblayer_copy_directory - Backing up file 12 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/aci.db) [15/Mar/2017:04:18:16.352965823 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/aci.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/aci.db [15/Mar/2017:04:18:16.353414487 -0400] - INFO - dblayer_copy_directory - Backing up file 13 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/id2entry.db) [15/Mar/2017:04:18:16.353873746 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/id2entry.db [15/Mar/2017:04:18:16.354351413 -0400] - INFO - dblayer_copy_directory - Backing up file 14 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/mail.db) [15/Mar/2017:04:18:16.354734731 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/mail.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/mail.db [15/Mar/2017:04:18:16.355169477 -0400] - INFO - dblayer_copy_directory - Backing up file 15 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/sn.db) [15/Mar/2017:04:18:16.355544586 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/sn.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/sn.db [15/Mar/2017:04:18:16.356020191 -0400] - INFO - dblayer_copy_directory - Backing up file 16 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/objectclass.db) [15/Mar/2017:04:18:16.356401455 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/objectclass.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/objectclass.db [15/Mar/2017:04:18:16.356753156 -0400] - INFO - dblayer_copy_directory - Backing up file 17 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/numsubordinates.db) [15/Mar/2017:04:18:16.357159932 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/userRoot/numsubordinates.db to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/numsubordinates.db [15/Mar/2017:04:18:16.357747180 -0400] - INFO - dblayer_backup - Backing up file 18 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/log.0000000001) [15/Mar/2017:04:18:16.358219729 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/log.0000000001 to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/log.0000000001 [15/Mar/2017:04:18:16.366663934 -0400] - INFO - dblayer_backup - Backing up file 19 (/var/lib/dirsrv/slapd-standalone_1/bak/backup_test/DBVERSION) [15/Mar/2017:04:18:16.367404790 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/db/DBVERSION to /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/DBVERSION [15/Mar/2017:04:18:16.369489087 -0400] - INFO - dblayer_pre_close - All database threads now stopped INFO:lib389:Running script: /usr/sbin/bak2db /var/lib/dirsrv/slapd-standalone_1/bak/backup_test -Z standalone_1 [15/Mar/2017:04:18:18.704732060 -0400] - INFO - dblayer_delete_transaction_logs - Deleting log file: (/var/lib/dirsrv/slapd-standalone_1/db/log.0000000001) [15/Mar/2017:04:18:18.707145813 -0400] - INFO - dblayer_restore - Restoring file 1 (/var/lib/dirsrv/slapd-standalone_1/db/DBVERSION) [15/Mar/2017:04:18:18.707741823 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/DBVERSION to /var/lib/dirsrv/slapd-standalone_1/db/DBVERSION [15/Mar/2017:04:18:18.708288289 -0400] - INFO - dblayer_copy_directory - Restoring file 2 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryrdn.db) [15/Mar/2017:04:18:18.708796615 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/entryrdn.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryrdn.db [15/Mar/2017:04:18:18.709334474 -0400] - INFO - dblayer_copy_directory - Restoring file 3 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/telephoneNumber.db) [15/Mar/2017:04:18:18.709770056 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/telephoneNumber.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/telephoneNumber.db [15/Mar/2017:04:18:18.710371643 -0400] - INFO - dblayer_copy_directory - Restoring file 4 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/uniquemember.db) [15/Mar/2017:04:18:18.710988695 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/uniquemember.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uniquemember.db [15/Mar/2017:04:18:18.711529025 -0400] - INFO - dblayer_copy_directory - Restoring file 5 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/DBVERSION) [15/Mar/2017:04:18:18.711994261 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/DBVERSION to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/DBVERSION [15/Mar/2017:04:18:18.712446819 -0400] - INFO - dblayer_copy_directory - Restoring file 6 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db) [15/Mar/2017:04:18:18.712931738 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/cn.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db [15/Mar/2017:04:18:18.713514499 -0400] - INFO - dblayer_copy_directory - Restoring file 7 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/ancestorid.db) [15/Mar/2017:04:18:18.714008150 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/ancestorid.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/ancestorid.db [15/Mar/2017:04:18:18.714528075 -0400] - INFO - dblayer_copy_directory - Restoring file 8 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/uid.db) [15/Mar/2017:04:18:18.715000936 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/uid.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uid.db [15/Mar/2017:04:18:18.715465721 -0400] - INFO - dblayer_copy_directory - Restoring file 9 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryusn.db) [15/Mar/2017:04:18:18.716008127 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/entryusn.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryusn.db [15/Mar/2017:04:18:18.716536020 -0400] - INFO - dblayer_copy_directory - Restoring file 10 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/parentid.db) [15/Mar/2017:04:18:18.717079184 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/parentid.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/parentid.db [15/Mar/2017:04:18:18.717595065 -0400] - INFO - dblayer_copy_directory - Restoring file 11 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/givenName.db) [15/Mar/2017:04:18:18.718064887 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/givenName.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/givenName.db [15/Mar/2017:04:18:18.718569962 -0400] - INFO - dblayer_copy_directory - Restoring file 12 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/nsuniqueid.db) [15/Mar/2017:04:18:18.719156043 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/nsuniqueid.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/nsuniqueid.db [15/Mar/2017:04:18:18.719696420 -0400] - INFO - dblayer_copy_directory - Restoring file 13 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/aci.db) [15/Mar/2017:04:18:18.720187973 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/aci.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/aci.db [15/Mar/2017:04:18:18.720666716 -0400] - INFO - dblayer_copy_directory - Restoring file 14 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db) [15/Mar/2017:04:18:18.721162474 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/id2entry.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db [15/Mar/2017:04:18:18.721772084 -0400] - INFO - dblayer_copy_directory - Restoring file 15 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/mail.db) [15/Mar/2017:04:18:18.722251706 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/mail.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/mail.db [15/Mar/2017:04:18:18.722851312 -0400] - INFO - dblayer_copy_directory - Restoring file 16 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/sn.db) [15/Mar/2017:04:18:18.723277697 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/sn.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/sn.db [15/Mar/2017:04:18:18.723749546 -0400] - INFO - dblayer_copy_directory - Restoring file 17 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/objectclass.db) [15/Mar/2017:04:18:18.724253289 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/objectclass.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/objectclass.db [15/Mar/2017:04:18:18.724798525 -0400] - INFO - dblayer_copy_directory - Restoring file 18 (/var/lib/dirsrv/slapd-standalone_1/db/userRoot/numsubordinates.db) [15/Mar/2017:04:18:18.725265351 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/userRoot/numsubordinates.db to /var/lib/dirsrv/slapd-standalone_1/db/userRoot/numsubordinates.db [15/Mar/2017:04:18:18.725802080 -0400] - INFO - dblayer_restore - Restoring file 19 (/var/lib/dirsrv/slapd-standalone_1/db/log.0000000001) [15/Mar/2017:04:18:18.726268286 -0400] - INFO - dblayer_copyfile - Copying /var/lib/dirsrv/slapd-standalone_1/bak/backup_test/log.0000000001 to /var/lib/dirsrv/slapd-standalone_1/db/log.0000000001 [15/Mar/2017:04:18:18.764676794 -0400] - INFO - dblayer_pre_close - All database threads now stopped INFO:tests.suites.basic.basic_test:test_basic_backup: PASSED
Passed suites/basic/basic_test.py::test_basic_systemctl 6.61
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test:Running test_basic_systemctl... INFO:tests.suites.basic.basic_test:Stopping the server... INFO:tests.suites.basic.basic_test:Stopped the server. INFO:tests.suites.basic.basic_test:Starting the server... INFO:tests.suites.basic.basic_test:Started the server. INFO:tests.suites.basic.basic_test:Stopping the server... INFO:tests.suites.basic.basic_test:Stopped the server before breaking the dse.ldif. INFO:tests.suites.basic.basic_test:Attempting to start the server with broken dse.ldif... Job for dirsrv@standalone_1.service failed because the control process exited with error code. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details. INFO:tests.suites.basic.basic_test:Server failed to start as expected INFO:tests.suites.basic.basic_test:Check the status... INFO:tests.suites.basic.basic_test:Server failed to start as expected INFO:tests.suites.basic.basic_test:Starting the server with good dse.ldif... INFO:tests.suites.basic.basic_test:Check the status... INFO:tests.suites.basic.basic_test:Server started after fixing dse.ldif. INFO:tests.suites.basic.basic_test:test_basic_systemctl: PASSED
Passed suites/basic/basic_test.py::test_basic_ldapagent 5.35
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test:Running test_basic_ldapagent... INFO:tests.suites.basic.basic_test:snmp ldap agent started INFO:tests.suites.basic.basic_test:Cleanup - killing agent: 3538 INFO:tests.suites.basic.basic_test:test_basic_ldapagent: PASSED
Passed suites/basic/basic_test.py::test_basic_dse 10.29
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test:Running test_basic_dse... INFO:tests.suites.basic.basic_test:dse.ldif was not corrupted, and the server was restarted INFO:tests.suites.basic.basic_test:test_basic_dse: PASSED
Passed suites/basic/basic_test.py::test_def_rootdse_attr[namingContexts] 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search hasn't namingContexts attr
Passed suites/basic/basic_test.py::test_def_rootdse_attr[supportedLDAPVersion] 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search hasn't supportedLDAPVersion attr
Passed suites/basic/basic_test.py::test_def_rootdse_attr[supportedControl] 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search hasn't supportedControl attr
Passed suites/basic/basic_test.py::test_def_rootdse_attr[supportedExtension] 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search hasn't supportedExtension attr
Passed suites/basic/basic_test.py::test_def_rootdse_attr[supportedSASLMechanisms] 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search hasn't supportedSASLMechanisms attr
Passed suites/basic/basic_test.py::test_def_rootdse_attr[vendorName] 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search hasn't vendorName attr
Passed suites/basic/basic_test.py::test_def_rootdse_attr[vendorVersion] 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search hasn't vendorVersion attr
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[namingContexts] 0.00
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.basic.basic_test: Add the nsslapd-return-default-opattr: namingContexts to rootdse ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search has namingContexts attr
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[supportedLDAPVersion] 0.00
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.basic.basic_test: Add the nsslapd-return-default-opattr: supportedLDAPVersion to rootdse ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search has supportedLDAPVersion attr
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[supportedControl] 0.00
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.basic.basic_test: Add the nsslapd-return-default-opattr: supportedControl to rootdse ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search has supportedControl attr
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[supportedExtension] 0.00
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.basic.basic_test: Add the nsslapd-return-default-opattr: supportedExtension to rootdse ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search has supportedExtension attr
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[supportedSASLMechanisms] 0.00
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.basic.basic_test: Add the nsslapd-return-default-opattr: supportedSASLMechanisms to rootdse ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search has supportedSASLMechanisms attr
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[vendorName] 0.00
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.basic.basic_test: Add the nsslapd-return-default-opattr: vendorName to rootdse ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search has vendorName attr
Passed suites/basic/basic_test.py::test_mod_def_rootdse_attr[vendorVersion] 0.00
---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.basic.basic_test: Add the nsslapd-return-default-opattr: vendorVersion to rootdse ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.basic.basic_test: Assert rootdse search has vendorVersion attr
Passed suites/betxns/betxn_test.py::test_betxn_init 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed suites/betxns/betxn_test.py::test_betxt_7bit 0.02
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.betxns.betxn_test:Running test_betxt_7bit... INFO:tests.suites.betxns.betxn_test:Modrdn failed as expected: error Constraint violation INFO:tests.suites.betxns.betxn_test:test_betxt_7bit: PASSED
Passed suites/betxns/betxn_test.py::test_betxn_attr_uniqueness 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.betxns.betxn_test:Running test_betxn_attr_uniqueness... ERROR:tests.suites.betxns.betxn_test:test_betxn_attr_uniqueness: Failed to add test user as expected: uid=test_entry1,dc=example,dc=com, error Constraint violation INFO:tests.suites.betxns.betxn_test:test_betxn_attr_uniqueness: PASSED
Passed suites/betxns/betxn_test.py::test_betxn_memberof 0.02
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.betxns.betxn_test:test_betxn_memberof: Group2 was correctly rejected (mod replace): error Object class violation INFO:tests.suites.betxns.betxn_test:test_betxn_memberof: Group2 was correctly rejected (mod add): error Object class violation INFO:tests.suites.betxns.betxn_test:test_betxn_memberof: PASSED
Passed suites/clu/clu_test.py::test_clu_pwdhash 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.clu.clu_test:Running test_clu_pwdhash... INFO:tests.suites.clu.clu_test:pwdhash generated: {SSHA}xgZFzq+QHpehCv7tbnL+NWBx4iQk94PtmNjUCg== INFO:tests.suites.clu.clu_test:test_clu_pwdhash: PASSED
Passed suites/ds_logs/ds_logs_test.py::test_check_default 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.ds_logs.ds_logs_test:Check the default value of nsslapd-logging-hr-timestamps-enabled, it should be ON DEBUG:tests.suites.ds_logs.ds_logs_test:on
Passed suites/ds_logs/ds_logs_test.py::test_plugin_set_invalid 0.00
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.ds_logs.ds_logs_test:test_plugin_set_invalid - Expect to fail with junk value
Passed suites/ds_logs/ds_logs_test.py::test_log_plugin_on 2.18
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.ds_logs.ds_logs_test:Bug 1273549 - Check access logs for milisecond, when attribute is ON INFO:tests.suites.ds_logs.ds_logs_test:perform any ldap operation, which will trigger the logs INFO:tests.suites.ds_logs.ds_logs_test:Adding 100 users INFO:tests.suites.ds_logs.ds_logs_test:Restart the server to flush the logs INFO:tests.suites.ds_logs.ds_logs_test:parse the access logs
Passed suites/ds_logs/ds_logs_test.py::test_log_plugin_off 5.66
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.ds_logs.ds_logs_test:Bug 1273549 - Check access logs for missing milisecond, when attribute is OFF INFO:tests.suites.ds_logs.ds_logs_test:test_log_plugin_off - set the configuraton attribute to OFF INFO:tests.suites.ds_logs.ds_logs_test:Restart the server to flush the logs INFO:tests.suites.ds_logs.ds_logs_test:test_log_plugin_off - delete the privious access logs INFO:tests.suites.ds_logs.ds_logs_test:Restart the server to flush the logs INFO:tests.suites.ds_logs.ds_logs_test:check access log that microseconds are not present
Passed suites/filter/filter_test.py::test_filter_escaped 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.filter.filter_test:Running test_filter_escaped... INFO:tests.suites.filter.filter_test:test_filter_escaped: PASSED
Passed suites/filter/filter_test.py::test_filter_search_original_attrs 0.00
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.filter.filter_test:Running test_filter_search_original_attrs... INFO:tests.suites.filter.filter_test:test_filter_search_original_attrs: PASSED
Passed suites/filter/rfc3673_all_oper_attrs_test.py::test_supported_features 0.00
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[-False-oper_attr_list0] 0.00
No log output captured.
Passed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[-False-oper_attr_list0-*] 0.00
No log output captured.
Passed suites/filter/rfc3673_all_oper_attrs_test.py::test_search_basic[-False-oper_attr_list0-objectClass] 0.00
No log output captured.
Passed suites/memory_leaks/range_search_test.py::test_range_search 4.99
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:tests.suites.memory_leaks.range_search_test:Initializing test_range_search... INFO:lib389.utils:Valgrind is now enabled. ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.memory_leaks.range_search_test:Running test_range_search...
Passed suites/password/password_test.py::test_password_delete_specific_password 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.password_test:Running test_password_delete_specific_password... INFO:tests.suites.password.password_test:test_password_delete_specific_password: PASSED
Passed suites/password/pwdAdmin_test.py::test_pwdAdmin_config_validation 0.00
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.pwdAdmin_test:test_pwdAdmin_config_validation: Failed as expected: Can't contact LDAP server INFO:tests.suites.password.pwdAdmin_test:test_pwdAdmin_config_validation: Failed as expected: Can't contact LDAP server
Passed suites/password/pwdPolicy_warning_test.py::test_different_values[ ] 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default value INFO:tests.suites.password.pwdPolicy_warning_test:An invalid value is being tested INFO:tests.suites.password.pwdPolicy_warning_test:Setting passwordSendExpiringTime to ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to set passwordSendExpiringTime to error:Operations error INFO:tests.suites.password.pwdPolicy_warning_test:Now check the value is unchanged INFO:tests.suites.password.pwdPolicy_warning_test:Invalid value was rejected correctly
Passed suites/password/pwdPolicy_warning_test.py::test_different_values[junk123] 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default value INFO:tests.suites.password.pwdPolicy_warning_test:An invalid value is being tested INFO:tests.suites.password.pwdPolicy_warning_test:Setting passwordSendExpiringTime to junk123 ERROR:tests.suites.password.pwdPolicy_warning_test:Failed to set passwordSendExpiringTime to junk123 error:Operations error INFO:tests.suites.password.pwdPolicy_warning_test:Now check the value is unchanged INFO:tests.suites.password.pwdPolicy_warning_test:Invalid value junk123 was rejected correctly
Passed suites/password/pwdPolicy_warning_test.py::test_different_values[on] 1.03
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default value INFO:tests.suites.password.pwdPolicy_warning_test:A valid value is being tested INFO:tests.suites.password.pwdPolicy_warning_test:Setting passwordSendExpiringTime to on INFO:tests.suites.password.pwdPolicy_warning_test:Now check that the value has been changed INFO:tests.suites.password.pwdPolicy_warning_test:passwordSendExpiringTime is now set to on INFO:tests.suites.password.pwdPolicy_warning_test:Set passwordSendExpiringTime back to the default value INFO:tests.suites.password.pwdPolicy_warning_test:Setting passwordSendExpiringTime to off
Passed suites/password/pwdPolicy_warning_test.py::test_different_values[off] 1.03
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.password.pwdPolicy_warning_test:Get the default value INFO:tests.suites.password.pwdPolicy_warning_test:A valid value is being tested INFO:tests.suites.password.pwdPolicy_warning_test:Setting passwordSendExpiringTime to off INFO:tests.suites.password.pwdPolicy_warning_test:Now check that the value has been changed INFO:tests.suites.password.pwdPolicy_warning_test:passwordSendExpiringTime is now set to off INFO:tests.suites.password.pwdPolicy_warning_test:Set passwordSendExpiringTime back to the default value INFO:tests.suites.password.pwdPolicy_warning_test:Setting passwordSendExpiringTime to off
Passed suites/plugins/attr_uniqueness_test.py::test_attr_uniqueness_init 0.02
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed suites/plugins/attr_uniqueness_test.py::test_attr_uniqueness 0.04
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.attr_uniqueness_test:Running test_attr_uniqueness... INFO:tests.suites.plugins.attr_uniqueness_test:test_attr_uniqueness: PASS
Passed suites/plugins/dna_test.py::test_basic 2.24
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.dna_test:Testing Distributed Numeric Assignment Plugin...
Passed suites/plugins/memberof_test.py::test_memberof_setloging 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_001 0.01
No log output captured.
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_003 2.40
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Enable MemberOf plugin
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_004 0.06
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp1,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh2,ou=people,dc=example,dc=com is memberof cn=group_memofegrp1,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp2,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh2,ou=people,dc=example,dc=com is memberof cn=group_memofegrp2,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_005 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is no longer memberof cn=group_memofegrp1,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_006 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is no longer memberof cn=group_memofegrp1,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_007 0.02
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh2,ou=people,dc=example,dc=com is no longer memberof cn=group_memofegrp1,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is no longer memberof cn=group_memofegrp2,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_008 4.73
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp1,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh2,ou=people,dc=example,dc=com is memberof cn=group_memofegrp2,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Remove uniqueMember as a memberofgrpattr INFO:tests.suites.plugins.memberof_test:Assert that this change of configuration did change the already set values INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_009 0.00
----------------------------- Captured stderr call -----------------------------
ERROR:tests.suites.plugins.memberof_test:Setting 'memberUid' as memberofgroupattr is rejected (expected)
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_010 0.00
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Try uid=user_memofenh1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp1,ou=groups,dc=example,dc=com (member) ERROR:tests.suites.plugins.memberof_test:uid=user_memofenh1,ou=people,dc=example,dc=com already member of cn=group_memofegrp1,ou=groups,dc=example,dc=com --> fail (expected)
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_011 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Check initial status INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Try uid=user_memofenh2,ou=people,dc=example,dc=com is memberof cn=group_memofegrp2,ou=groups,dc=example,dc=com (member) ERROR:tests.suites.plugins.memberof_test:uid=user_memofenh2,ou=people,dc=example,dc=com already member of cn=group_memofegrp2,ou=groups,dc=example,dc=com --> fail (expected) INFO:tests.suites.plugins.memberof_test:Check final status INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_012 0.00
No log output captured.
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_013 0.00
No log output captured.
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_014 0.05
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Check initial status INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp3,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp3,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is not memberof cn=group_memofegrp3,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh2,ou=people,dc=example,dc=com is memberof cn=group_memofegrp3,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Checking final status INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_015 0.02
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Checking Initial status INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp015,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Update uid=user_dummy1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp015,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_dummy2,ou=people,dc=example,dc=com is memberof cn=group_memofegrp015,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_016 0.04
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp016,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp016,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofenh1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp016,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Update uid=user_dummy1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp016,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_dummy1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp016,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_017 0.06
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp017,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Update uid=user_memofuser1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp017,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofuser2,ou=people,dc=example,dc=com is memberof cn=group_memofegrp017,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofuser3,ou=people,dc=example,dc=com is memberof cn=group_memofegrp017,ou=groups,dc=example,dc=com (memberuid) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_018 0.09
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp018,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Update uid=user_memofuser1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp017,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofuser1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp017,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofuser1,ou=people,dc=example,dc=com is memberof cn=group_memofegrp017,ou=groups,dc=example,dc=com (memberuid) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp018,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp018,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp018,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp018,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp018,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp018,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Update uid=user_memofuser1,ou=people,dc=example,dc=com is no longer memberof cn=group_memofegrp018,ou=groups,dc=example,dc=com (member) INFO:tests.suites.plugins.memberof_test:Update uid=user_memofuser1,ou=people,dc=example,dc=com is no longer memberof cn=group_memofegrp018,ou=groups,dc=example,dc=com (uniqueMember) INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp017,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_019 0.15
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp019_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp019_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp019_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp019_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_020 0.21
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_021 0.18
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create user uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Create group cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_022 5.34
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_023 0.11
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp016,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofenh2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_024 0.05
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified
Passed suites/plugins/memberof_test.py::test_memberof_MultiGrpAttr_025 0.15
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:uniqueMember: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_2,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_3,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_4,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_1,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:member: cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_5,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = cn=group_memofegrp020_5,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_1,ou=groups,dc=example,dc=com.member = uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_2,ou=groups,dc=example,dc=com.member = uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_3,ou=groups,dc=example,dc=com.member = uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.uniqueMember = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Check cn=group_memofegrp020_4,ou=groups,dc=example,dc=com.member = uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_1,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_2,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_3,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from cn=group_memofegrp020_4,ou=groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:--> membership verified INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser1,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:memberof: cn=group_memofegrp020_5,ou=Groups,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser2,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser3,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com INFO:tests.suites.plugins.memberof_test:Lookup memberof from uid=user_memofuser4,ou=people,dc=example,dc=com
Passed suites/plugins/memberof_test.py::test_memberof_auto_add_oc 0.11
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.memberof_test:Correctly rejected invalid objectclass INFO:tests.suites.plugins.memberof_test:Test complete.
Passed suites/plugins/rootdn_plugin_test.py::test_rootdn_init 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.rootdn_plugin_test:Initializing root DN test suite... INFO:tests.suites.plugins.rootdn_plugin_test:test_rootdn_init: Initialized root DN test suite.
Passed suites/plugins/rootdn_plugin_test.py::test_rootdn_config_validate 0.00
----------------------------- Captured stderr call -----------------------------
INFO:tests.suites.plugins.rootdn_plugin_test:Running test_rootdn_config_validate... INFO:tests.suites.plugins.rootdn_plugin_test:test_rootdn_config_validate: PASSED
Passed suites/replication/single_master_test.py::test_lastupdate_attr_before_init 0.05
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}DAH7PgKFpCW2l7Hc/Y+VuVTQQSSNNt2DZ+RTvaMLFPiFvALHz2a3IfGIb9KCYN2hxHWdM7yuyR0k+MdySjJMYnvRpYN/IutT INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}sKYPoQDu95UumvEyNxWXDxsxFGb2JUAudKvFjfLJZgt9z77XI6K69LQfNgudy0UH+asQGcf/E7RWBJDQMXTj/eljm0YQuRC8 ----------------------------- Captured stderr call -----------------------------
CRITICAL:lib389:testReplication() failed to modify (dc=example,dc=com), error ({'desc': "Can't contact LDAP server"})
Passed suites/replication/tombstone_test.py::test_purge_success 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389.utils:Setting up replication... INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}5FJYUdeVRW5DPUTQLBweuMxvdenQJpP0We7+v8Vm7+AhGCTK6Dp+7HgIWS6zyVWAA14lmYK1uBuAoNC+7027yJOrltmmyaSn INFO:lib389.utils:Add and then delete an entry to create a tombstone... INFO:lib389.utils:Search for tombstone entries...
Passed tickets/ticket365_test.py::test_ticket365 4.05
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket365_test:Test complete INFO:tests.tickets.ticket365_test:Test complete
Passed tickets/ticket47313_test.py::test_ticket47313_run 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:Bind as cn=Directory Manager INFO:lib389: ######################### ADD ###################### INFO:lib389:Try to add Add cn=test_entry both, dc=example,dc=com: dn: cn=test_entry both, dc=example,dc=com cn: test_entry both cn;en: test_entry en cn;fr: test_entry fr objectclass: top objectclass: person sn: test_entry both INFO:lib389:Try to add Add cn=test_entry en only, dc=example,dc=com: dn: cn=test_entry en only, dc=example,dc=com cn: test_entry en only cn;en: test_entry en objectclass: top objectclass: person sn: test_entry en only INFO:lib389: ######################### SEARCH ###################### INFO:lib389:Try to search with filter (&(sn=test_entry en only)(!(cn=test_entry fr))) INFO:lib389:Found cn=test_entry en only,dc=example,dc=com INFO:lib389:Try to search with filter (&(sn=test_entry en only)(!(cn;fr=test_entry fr))) INFO:lib389:Found cn=test_entry en only,dc=example,dc=com INFO:lib389:Try to search with filter (&(sn=test_entry en only)(!(cn;en=test_entry en))) INFO:lib389:Found none INFO:lib389: ######################### DELETE ###################### INFO:lib389:Try to delete cn=test_entry both, dc=example,dc=com INFO:lib389:Try to delete cn=test_entry en only, dc=example,dc=com INFO:tests.tickets.ticket47313_test:Testcase PASSED
Passed tickets/ticket47384_test.py::test_ticket47384 0.02
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
WARNING:tests.tickets.ticket47384_test:Failed to copy /usr/lib64/dirsrv/plugins/libwhoami-plugin.la to the tmp directory, error: No such file or directory INFO:tests.tickets.ticket47384_test:Test complete
Passed tickets/ticket47431_test.py::test_ticket47431_0 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47431_test:Ticket 47431 - 0: Enable 7bit plugin...
Passed tickets/ticket47431_test.py::test_ticket47431_1 1.20
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47431_test:Ticket 47431 - 1: Check 26 duplicate values are treated as one... DEBUG:tests.tickets.ticket47431_test:modify_s cn=7-bit check,cn=plugins,cn=config DEBUG:tests.tickets.ticket47431_test:line: [15/Mar/2017:04:23:10.594287568 -0400] - WARN - str2entry_dupcheck - 26 duplicate values for attribute type nsslapd-pluginarg2 detected in entry cn=7-bit check,cn=plugins,cn=config. Extra values ignored. INFO:tests.tickets.ticket47431_test:Expected error "str2entry_dupcheck.* duplicate values for attribute type nsslapd-pluginarg2 detected in entry cn=7-bit check,cn=plugins,cn=config." logged in /var/log/dirsrv/slapd-standalone_1/errors INFO:tests.tickets.ticket47431_test:Ticket 47431 - 1: done
Passed tickets/ticket47431_test.py::test_ticket47431_2 2.40
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47431_test:Ticket 47431 - 2: Check two values belonging to one arg is fixed... DEBUG:tests.tickets.ticket47431_test:line - [15/Mar/2017:04:23:12.807948673 -0400] - DEBUG - NS7bitAttr - NS7bitAttr_Init - 0: uid DEBUG:tests.tickets.ticket47431_test:ATTRS[0] uid DEBUG:tests.tickets.ticket47431_test:uid was logged DEBUG:tests.tickets.ticket47431_test:line - [15/Mar/2017:04:23:12.808893095 -0400] - DEBUG - NS7bitAttr - NS7bitAttr_Init - 1: mail DEBUG:tests.tickets.ticket47431_test:ATTRS[1] mail DEBUG:tests.tickets.ticket47431_test:mail was logged DEBUG:tests.tickets.ticket47431_test:line - [15/Mar/2017:04:23:12.809598068 -0400] - DEBUG - NS7bitAttr - NS7bitAttr_Init - 2: userpassword DEBUG:tests.tickets.ticket47431_test:ATTRS[2] userpassword DEBUG:tests.tickets.ticket47431_test:userpassword was logged DEBUG:tests.tickets.ticket47431_test:line - [15/Mar/2017:04:23:12.810234408 -0400] - DEBUG - NS7bitAttr - NS7bitAttr_Init - 3: , DEBUG:tests.tickets.ticket47431_test:ATTRS[3] , DEBUG:tests.tickets.ticket47431_test:, was logged DEBUG:tests.tickets.ticket47431_test:line - [15/Mar/2017:04:23:12.810849100 -0400] - DEBUG - NS7bitAttr - NS7bitAttr_Init - 4: dc=example,dc=com DEBUG:tests.tickets.ticket47431_test:ATTRS[4] dc=example,dc=com DEBUG:tests.tickets.ticket47431_test:dc=example,dc=com was logged INFO:tests.tickets.ticket47431_test:Ticket 47431 - 2: done
Passed tickets/ticket47431_test.py::test_ticket47431_3 1.42
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47431_test:Ticket 47431 - 3: Check missing args are fixed... DEBUG:tests.tickets.ticket47431_test:uid was logged DEBUG:tests.tickets.ticket47431_test:mail was logged DEBUG:tests.tickets.ticket47431_test:userpassword was logged DEBUG:tests.tickets.ticket47431_test:, was logged DEBUG:tests.tickets.ticket47431_test:dc=example,dc=com was logged INFO:tests.tickets.ticket47431_test:Ticket 47431 - 3: done INFO:tests.tickets.ticket47431_test:Test complete
Passed tickets/ticket47560_test.py::test_ticket47560 12.43
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
DEBUG:tests.tickets.ticket47560_test:-------- > _test_ticket47560_setup DEBUG:tests.tickets.ticket47560_test:-------------> _enable_disable_mbo(on) DEBUG:tests.tickets.ticket47560_test:-------- > Start ticket tests DEBUG:tests.tickets.ticket47560_test:Unfixed entry dn: uid=member,dc=example,dc=com cn: member memberOf: cn=group,dc=example,dc=com objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetorgperson objectClass: inetUser sn: member uid: member INFO:lib389:fixupMemberOf task fixupmemberof_03152017_042411 for basedn dc=example,dc=com completed successfully DEBUG:tests.tickets.ticket47560_test:Fixed entry dn: uid=member,dc=example,dc=com cn: member objectClass: top objectClass: person objectClass: organizationalPerson objectClass: inetorgperson objectClass: inetUser sn: member uid: member DEBUG:tests.tickets.ticket47560_test:-------- > _test_ticket47560_teardown DEBUG:tests.tickets.ticket47560_test:-------------> _enable_disable_mbo(off) INFO:tests.tickets.ticket47560_test:Testcase PASSED
Passed tickets/ticket47640_test.py::test_ticket47640 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47640_test:Add operation correctly rejected. INFO:tests.tickets.ticket47640_test:Test complete
Passed tickets/ticket47653_test.py::test_ticket47653_init 0.15
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:Add OCticket47653 that allows 'member' attribute INFO:lib389:Add cn=bind_entry, dc=example,dc=com
Passed tickets/ticket47669_test.py::test_ticket47669_init 2.25
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47669_test:Testing Ticket 47669 - Test duration syntax in the changelogs INFO:lib389:Bind as cn=Directory Manager
Passed tickets/ticket47669_test.py::test_ticket47669_changelog_maxage 0.03
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47669_test:1. Test nsslapd-changelogmaxage in cn=changelog5,cn=config INFO:lib389:Bind as cn=Directory Manager INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 12345 -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 10s -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 30M -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 12h -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 2D -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 4w -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: -123 -- invalid ERROR:tests.tickets.ticket47669_test:Expectedly failed to add nsslapd-changelogmaxage: -123 to cn=changelog5,cn=config: error Server is unwilling to perform INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: xyz -- invalid ERROR:tests.tickets.ticket47669_test:Expectedly failed to add nsslapd-changelogmaxage: xyz to cn=changelog5,cn=config: error Server is unwilling to perform
Passed tickets/ticket47669_test.py::test_ticket47669_changelog_triminterval 0.03
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47669_test:2. Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config INFO:lib389:Bind as cn=Directory Manager INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogtrim-interval: 12345 -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogtrim-interval: 10s -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogtrim-interval: 30M -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogtrim-interval: 12h -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogtrim-interval: 2D -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogtrim-interval: 4w -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogtrim-interval: -123 -- invalid ERROR:tests.tickets.ticket47669_test:Expectedly failed to add nsslapd-changelogtrim-interval: -123 to cn=changelog5,cn=config: error Server is unwilling to perform INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogtrim-interval: xyz -- invalid ERROR:tests.tickets.ticket47669_test:Expectedly failed to add nsslapd-changelogtrim-interval: xyz to cn=changelog5,cn=config: error Server is unwilling to perform
Passed tickets/ticket47669_test.py::test_ticket47669_changelog_compactdbinterval 0.03
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47669_test:3. Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config INFO:lib389:Bind as cn=Directory Manager INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogcompactdb-interval: 12345 -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogcompactdb-interval: 10s -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogcompactdb-interval: 30M -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogcompactdb-interval: 12h -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogcompactdb-interval: 2D -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogcompactdb-interval: 4w -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogcompactdb-interval: -123 -- invalid ERROR:tests.tickets.ticket47669_test:Expectedly failed to add nsslapd-changelogcompactdb-interval: -123 to cn=changelog5,cn=config: error Server is unwilling to perform INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogcompactdb-interval: xyz -- invalid ERROR:tests.tickets.ticket47669_test:Expectedly failed to add nsslapd-changelogcompactdb-interval: xyz to cn=changelog5,cn=config: error Server is unwilling to perform
Passed tickets/ticket47669_test.py::test_ticket47669_retrochangelog_maxage 0.04
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47669_test:4. Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config INFO:lib389:Bind as cn=Directory Manager INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 12345 -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 10s -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 30M -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 12h -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 2D -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: 4w -- valid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: -123 -- invalid INFO:tests.tickets.ticket47669_test:Test nsslapd-changelogmaxage: xyz -- invalid INFO:lib389:ticket47669 was successfully verified.
Passed tickets/ticket47714_test.py::test_ticket47714_init 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime. INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Adding Account Policy entry: cn=Account Inactivation Policy,dc=example,dc=com ###################### INFO:lib389.utils: ######################### Adding Test User entry: uid=ticket47714user,dc=example,dc=com ######################
Passed tickets/ticket47781_test.py::test_ticket47781 3.32
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47781_test:Testing Ticket 47781 - Testing for deadlock after importing LDIF with replication data INFO:tests.tickets.ticket47781_test:Setting up replication... INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}oTzi+xEy1ArwErnPwmhQFn5/oPquisHG08FEohBj/VXcbnNeRmp/PZgHwkObD/+q9VH9HOA4d5iSyT1K8incbXZevUtckYZI INFO:tests.tickets.ticket47781_test:Adding two entries... INFO:tests.tickets.ticket47781_test:Exporting replication ldif... INFO:lib389:Export task export_03152017_042605 for file /tmp/export.ldif completed successfully INFO:tests.tickets.ticket47781_test:Restarting server... INFO:tests.tickets.ticket47781_test:Import replication LDIF file... INFO:lib389:Import task import_03152017_042606 for file /tmp/export.ldif completed successfully INFO:tests.tickets.ticket47781_test:Search for tombstone entries(should find one and not hang)...
Passed tickets/ticket47808_test.py::test_ticket47808_run 1.18
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:Bind as cn=Directory Manager INFO:lib389: ######################### SETUP ATTR UNIQ PLUGIN ###################### INFO:lib389: ######################### ADD USER 1 ###################### INFO:lib389:Try to add Add dn: cn=test_entry 1, dc=example,dc=com cn: test_entry 1 objectclass: top objectclass: person sn: test_entry : dn: cn=test_entry 1, dc=example,dc=com cn: test_entry 1 objectclass: top objectclass: person sn: test_entry INFO:lib389: ######################### Restart Server ###################### INFO:lib389: ######################### ADD USER 2 ###################### INFO:lib389:Try to add Add dn: cn=test_entry 2, dc=example,dc=com cn: test_entry 2 objectclass: top objectclass: person sn: test_entry : dn: cn=test_entry 2, dc=example,dc=com cn: test_entry 2 objectclass: top objectclass: person sn: test_entry WARNING:lib389:Adding cn=test_entry 2, dc=example,dc=com failed INFO:lib389: ######################### IS SERVER UP? ###################### INFO:lib389:Yes, it's up. INFO:lib389: ######################### CHECK USER 2 NOT ADDED ###################### INFO:lib389:Try to search cn=test_entry 2, dc=example,dc=com INFO:lib389:Found none INFO:lib389: ######################### DELETE USER 1 ###################### INFO:lib389:Try to delete cn=test_entry 1, dc=example,dc=com INFO:tests.tickets.ticket47808_test:Testcase PASSED
Passed tickets/ticket47815_test.py::test_ticket47815 5.29
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389.utils:Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache INFO:lib389.utils:Adding automember config INFO:lib389.utils:Adding automember group INFO:lib389.utils:Adding invalid entry
Passed tickets/ticket47819_test.py::test_ticket47819 34.13
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47819_test:Testing Ticket 47819 - Test precise tombstone purging INFO:tests.tickets.ticket47819_test:Setting up replication... INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}KhuJU4kq9oF+HQgDFVjbcazeJ1d+ne29RMam4CquOsjetoTXhBQzVCfSrMFIDyqvk2dBMGpEGZqx7L8J1MWhp4u2WI0zRlL0 INFO:tests.tickets.ticket47819_test:Part 1: Add and then delete an entry to create a tombstone... INFO:tests.tickets.ticket47819_test:Search for tombstone entries... INFO:tests.tickets.ticket47819_test:Part 1 - passed INFO:tests.tickets.ticket47819_test:Part 2: Exporting replication ldif... INFO:lib389:Export task export_03152017_042634 for file /tmp/export.ldif completed successfully INFO:tests.tickets.ticket47819_test:Import replication LDIF file... INFO:lib389:Import task import_03152017_042639 for file /tmp/export.ldif completed successfully INFO:tests.tickets.ticket47819_test:Search for tombstone entries... INFO:tests.tickets.ticket47819_test:Part 2 - passed INFO:tests.tickets.ticket47819_test:Part 3: test the fixup task INFO:lib389:tombstone fixup task fixupTombstone_03152017_042642 for backend userRoot completed successfully INFO:tests.tickets.ticket47819_test:Search for tombstone entries... INFO:lib389:tombstone fixup task fixupTombstone_03152017_042645 for backend userRoot completed successfully INFO:tests.tickets.ticket47819_test:Search for tombstone entries... INFO:tests.tickets.ticket47819_test:Part 3 - passed INFO:tests.tickets.ticket47819_test:Part 4: test tombstone purging... INFO:tests.tickets.ticket47819_test:Wait for tombstone purge interval to pass... INFO:tests.tickets.ticket47819_test:Perform an update to help trigger tombstone purging... INFO:tests.tickets.ticket47819_test:Wait for tombstone purge interval to pass again... INFO:tests.tickets.ticket47819_test:Search for tombstone entries... INFO:tests.tickets.ticket47819_test:Part 4 - passed
Passed tickets/ticket47823_test.py::test_ticket47823_init 7.41
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed tickets/ticket47823_test.py::test_ticket47823_one_container_add 8.86
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With former config (args), check attribute uniqueness with 'cn' (ADD) INFO:lib389:####### INFO:lib389:############################################### INFO:lib389:Uniqueness not enforced: create the entries INFO:lib389:Uniqueness enforced: checks second entry is rejected INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With new config (args), check attribute uniqueness with 'cn' (ADD) INFO:lib389:####### INFO:lib389:############################################### INFO:lib389:Uniqueness not enforced: create the entries INFO:lib389:Uniqueness enforced: checks second entry is rejected
Passed tickets/ticket47823_test.py::test_ticket47823_one_container_mod 4.42
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With former config (args), check attribute uniqueness with 'cn' (MOD) INFO:lib389:####### INFO:lib389:############################################### INFO:lib389:Uniqueness enforced: checks MOD ADD entry is rejected INFO:lib389:Uniqueness enforced: checks MOD REPLACE entry is rejected INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With new config (args), check attribute uniqueness with 'cn' (MOD) INFO:lib389:####### INFO:lib389:############################################### INFO:lib389:Uniqueness enforced: checks MOD ADD entry is rejected INFO:lib389:Uniqueness enforced: checks MOD REPLACE entry is rejected
Passed tickets/ticket47823_test.py::test_ticket47823_one_container_modrdn 4.42
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With former config (args), check attribute uniqueness with 'cn' (MODRDN) INFO:lib389:####### INFO:lib389:############################################### INFO:lib389:Uniqueness enforced: checks MODRDN entry is rejected INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With former config (args), check attribute uniqueness with 'cn' (MODRDN) INFO:lib389:####### INFO:lib389:############################################### INFO:lib389:Uniqueness enforced: checks MODRDN entry is rejected
Passed tickets/ticket47823_test.py::test_ticket47823_multi_containers_add 4.43
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With former config (args), check attribute uniqueness with 'cn' (ADD) INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With new config (args), check attribute uniqueness with 'cn' (ADD) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47823_test.py::test_ticket47823_multi_containers_mod 4.47
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container INFO:lib389:####### INFO:lib389:############################################### INFO:lib389:Uniqueness not enforced: if same 'cn' modified (add/replace) on separated containers INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container INFO:lib389:####### INFO:lib389:############################################### INFO:lib389:Uniqueness not enforced: if same 'cn' modified (add/replace) on separated containers
Passed tickets/ticket47823_test.py::test_ticket47823_multi_containers_modrdn 4.44
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers INFO:lib389:####### INFO:lib389:############################################### INFO:lib389:Uniqueness not enforced: checks MODRDN entry is accepted on separated containers INFO:lib389:Uniqueness not enforced: checks MODRDN entry is accepted on separated containers
Passed tickets/ticket47823_test.py::test_ticket47823_across_multi_containers_add 2.20
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47823_test.py::test_ticket47823_across_multi_containers_mod 2.22
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47823_test.py::test_ticket47823_across_multi_containers_modrdn 2.21
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47823_test.py::test_ticket47823_invalid_config_1 4.62
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Invalid config (old): arg0 is missing INFO:lib389:####### INFO:lib389:############################################### Job for dirsrv@standalone_1.service failed because the control process exited with error code. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details.
Passed tickets/ticket47823_test.py::test_ticket47823_invalid_config_2 4.45
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Invalid config (old): arg1 is missing INFO:lib389:####### INFO:lib389:############################################### Job for dirsrv@standalone_1.service failed because the control process exited with error code. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details.
Passed tickets/ticket47823_test.py::test_ticket47823_invalid_config_3 4.61
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Invalid config (old): arg0 is missing but new config attrname exists INFO:lib389:####### INFO:lib389:############################################### Job for dirsrv@standalone_1.service failed because the control process exited with error code. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details.
Passed tickets/ticket47823_test.py::test_ticket47823_invalid_config_4 4.45
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Invalid config (old): arg1 is missing but new config exist INFO:lib389:####### INFO:lib389:############################################### Job for dirsrv@standalone_1.service failed because the control process exited with error code. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details.
Passed tickets/ticket47823_test.py::test_ticket47823_invalid_config_5 4.43
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Invalid config (new): uniqueness-attribute-name is missing INFO:lib389:####### INFO:lib389:############################################### Job for dirsrv@standalone_1.service failed because the control process exited with error code. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details.
Passed tickets/ticket47823_test.py::test_ticket47823_invalid_config_6 4.44
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Invalid config (new): uniqueness-subtrees is missing INFO:lib389:####### INFO:lib389:############################################### Job for dirsrv@standalone_1.service failed because the control process exited with error code. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details.
Passed tickets/ticket47823_test.py::test_ticket47823_invalid_config_7 4.60
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Invalid config (new): uniqueness-subtrees are invalid INFO:lib389:####### INFO:lib389:############################################### Job for dirsrv@standalone_1.service failed because the control process exited with error code. See "systemctl status dirsrv@standalone_1.service" and "journalctl -xe" for details.
Passed tickets/ticket47828_test.py::test_ticket47828_init 2.21
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed tickets/ticket47828_test.py::test_ticket47828_run_0 0.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_1 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_2 0.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_3 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_4 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Exclude the provisioning container INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_5 0.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_6 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_7 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_8 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_9 0.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_10 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_11 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Exclude (in addition) the dummy container INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_12 0.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_13 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_14 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_15 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_16 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR not is set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_17 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_18 0.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Exclude PROVISIONING and a wrong container INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_19 0.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_20 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_21 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_22 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_23 0.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_24 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_25 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Exclude a wrong container INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_26 0.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_27 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_28 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_29 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_30 0.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47828_test.py::test_ticket47828_run_31 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47829_test.py::test_ticket47829_init 2.20
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed tickets/ticket47829_test.py::test_ticket47829_mod_active_user_1 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### MOD: add an active user to an active group INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: delete entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_mod_active_user_2 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### MOD: add an Active user to a Stage group INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=stage group,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=stage group,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com: member ->cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: delete entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=stage group,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_mod_active_user_3 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### MOD: add an Active user to a out of scope group INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=out group,cn=out,dc=example,dc=com INFO:lib389:!!!!!!! cn=out group,cn=out,dc=example,dc=com: member ->cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: delete entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=out group,cn=out,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_mod_stage_user_1 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### MOD: add an Stage user to a Active group INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389: delete entry cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_mod_stage_user_2 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### MOD: add an Stage user to a Stage group INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:to group cn=stage group,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=stage group,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com: member ->cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389: delete entry cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:to group cn=stage group,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_mod_stage_user_3 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### MOD: add an Stage user to a out of scope group INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:to group cn=out group,cn=out,dc=example,dc=com INFO:lib389:!!!!!!! cn=out group,cn=out,dc=example,dc=com: member ->cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389: delete entry cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:to group cn=out group,cn=out,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_mod_out_user_1 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### MOD: add an out of scope user to an active group INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=out guy,cn=out,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=out guy,cn=out,dc=example,dc=com INFO:lib389: delete entry cn=out guy,cn=out,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_mod_out_user_2 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### MOD: add an out of scope user to a Stage group INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=out guy,cn=out,dc=example,dc=com INFO:lib389:to group cn=stage group,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=stage group,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com: member ->cn=out guy,cn=out,dc=example,dc=com INFO:lib389: delete entry cn=out guy,cn=out,dc=example,dc=com INFO:lib389:to group cn=stage group,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_mod_out_user_3 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### MOD: add an out of scope user to an out of scope group INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=out guy,cn=out,dc=example,dc=com INFO:lib389:to group cn=out group,cn=out,dc=example,dc=com INFO:lib389:!!!!!!! cn=out group,cn=out,dc=example,dc=com: member ->cn=out guy,cn=out,dc=example,dc=com INFO:lib389: delete entry cn=out guy,cn=out,dc=example,dc=com INFO:lib389:to group cn=out group,cn=out,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_mod_active_user_modrdn_active_user_1 2.02
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Active user to a Active group. Then move Active user to Active INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=xactive guy ###################### INFO:lib389:!!!!!!! cn=xactive guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=xactive guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=active guy ###################### INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: delete entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_mod_active_user_modrdn_stage_user_1 1.02
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Active user to a Active group. Then move Active user to Stage INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=active guy ###################### INFO:lib389: ######################### MODRDN cn=active guy ######################
Passed tickets/ticket47829_test.py::test_ticket47829_mod_active_user_modrdn_out_user_1 1.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Active user to a Active group. Then move Active user to out of scope INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=active guy ###################### INFO:lib389: ######################### MODRDN cn=active guy ######################
Passed tickets/ticket47829_test.py::test_ticket47829_mod_modrdn_1 1.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Stage user to a Active group. Then move Stage user to Active INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=stage guy ###################### INFO:lib389:!!!!!!! cn=stage guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=stage guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=stage guy ######################
Passed tickets/ticket47829_test.py::test_ticket47829_mod_stage_user_modrdn_active_user_1 1.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Stage user to a Active group. Then move Stage user to Active INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=stage guy ###################### INFO:lib389:!!!!!!! cn=stage guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=stage guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=stage guy ######################
Passed tickets/ticket47829_test.py::test_ticket47829_mod_stage_user_modrdn_stage_user_1 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Stage user to a Active group. Then move Stage user to Stage INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Return because it requires a fix for 47833 INFO:lib389:####### INFO:lib389:###############################################
Passed tickets/ticket47829_test.py::test_ticket47829_indirect_active_group_1 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Active group (G1) to an active group (G0). Then add active user to G1 INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=indirect active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: delete entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com
Passed tickets/ticket47829_test.py::test_ticket47829_indirect_active_group_2 1.02
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=indirect active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=active guy ###################### INFO:lib389: ######################### MODRDN cn=active guy ######################
Passed tickets/ticket47829_test.py::test_ticket47829_indirect_active_group_3 1.02
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=active guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=indirect active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=active guy ###################### INFO:lib389: ######################### MODRDN cn=active guy ######################
Passed tickets/ticket47829_test.py::test_ticket47829_indirect_active_group_4 1.02
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=indirect active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=stage guy ###################### INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=stage guy,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=indirect active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=stage guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=stage guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=indirect active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=stage guy,cn=accounts,cn=in,dc=example,dc=com: memberof->cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=stage guy ###################### INFO:lib389:!!!!!!! cn=indirect active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=active group,cn=accounts,cn=in,dc=example,dc=com
Passed tickets/ticket47833_test.py::test_ticket47829_init 2.19
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed tickets/ticket47833_test.py::test_ticket47829_mod_stage_user_modrdn_stage_user_1 1.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### add an Stage user to a Active group. Then move Stage user to Stage INFO:lib389:####### INFO:lib389:############################################### INFO:lib389: add entry cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389:to group cn=active group,cn=accounts,cn=in,dc=example,dc=com INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com INFO:lib389: ######################### MODRDN cn=xstage guy ###################### INFO:lib389:!!!!!!! cn=active group,cn=accounts,cn=in,dc=example,dc=com: member ->cn=stage guy,cn=staged users,cn=provisioning,cn=in,dc=example,dc=com
Passed tickets/ticket47838_test.py::test_47838_init 2.49
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stdout call -----------------------------
/etc/dirsrv/slapd-standalone_1/pwdfile.txt 51f704aae71f714dc4bae00cbef30870cd1098ce Is this a CA certificate [y/N]? Enter the path length constraint, enter to skip [<0 for unlimited path]: > Is this a critical extension [y/N]? ----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Testing Ticket 47838 - harden the list of ciphers available by default INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Checking existing certs ###################### certutil: Could not find cert: CA certificate : PR_FILE_NOT_FOUND_ERROR: File not found certutil: Could not find cert: Server-Cert : PR_FILE_NOT_FOUND_ERROR: File not found INFO:lib389.utils: ######################### Create a password file ###################### INFO:lib389.utils: ######################### Create a noise file ###################### INFO:lib389.utils: ######################### Create key3.db and cert8.db database ###################### INFO:lib389.utils: ######################### Creating encryption key for CA ###################### Generating key. This may take a few moments... INFO:lib389.utils: ######################### Creating self-signed CA certificate ###################### Generating key. This may take a few moments... INFO:lib389.utils: ######################### Exporting the CA certificate to cacert.asc ###################### INFO:lib389.utils: ######################### Generate the server certificate ###################### Generating key. This may take a few moments... Notice: Trust flag u is set automatically if the private key is present. INFO:lib389.utils: ######################### create the pin file ###################### INFO:lib389.utils: ######################### enable SSL in the directory server with all ciphers ######################
Passed tickets/ticket47838_test.py::test_47838_run_0 6.81
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 64 INFO:lib389.utils:Disabled ciphers: 7 INFO:lib389.utils:Weak ciphers: 17 INFO:lib389.utils:Checking nsSSLEnabledCiphers... INFO:lib389:1 results INFO:lib389:Results: INFO:lib389:dn: cn=encryption,cn=config INFO:lib389:enabledCipherCount: 64
Passed tickets/ticket47838_test.py::test_47838_run_1 3.28
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 47 INFO:lib389.utils:Disabled ciphers: 24 INFO:lib389.utils:Weak ciphers: 17 INFO:lib389.utils:Checking nsSSLEnabledCiphers... INFO:lib389:1 results INFO:lib389:Results: INFO:lib389:dn: cn=encryption,cn=config INFO:lib389:enabledCipherCount: 47
Passed tickets/ticket47838_test.py::test_47838_run_2 2.28
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 2 INFO:lib389.utils:Disabled ciphers: 69 INFO:lib389.utils:Checking nsSSLEnabledCiphers... INFO:lib389:1 results INFO:lib389:Results: INFO:lib389:dn: cn=encryption,cn=config INFO:lib389:enabledCipherCount: 2
Passed tickets/ticket47838_test.py::test_47838_run_3 2.25
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 4 - Check the ciphers availability for "-all" INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 0 INFO:lib389.utils:Disabling SSL message?: INFO:lib389.utils:Checking nsSSLEnabledCiphers... INFO:lib389:1 results INFO:lib389:Results: INFO:lib389:dn: cn=encryption,cn=config INFO:lib389:enabledCipherCount: 0
Passed tickets/ticket47838_test.py::test_47838_run_6 2.27
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 7 - Check nsSSL3Ciphers: +all,-tls_dhe_rsa_aes_128_gcm_sha with default allowWeakCipher INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 46 INFO:lib389.utils:Disabled ciphers: 25 INFO:lib389.utils:ALL Ecount: 47 INFO:lib389.utils:ALL Dcount: 24 INFO:lib389.utils:Checking nsSSLEnabledCiphers... INFO:lib389:1 results INFO:lib389:Results: INFO:lib389:dn: cn=encryption,cn=config INFO:lib389:enabledCipherCount: 46
Passed tickets/ticket47838_test.py::test_47838_run_7 2.29
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Enabled ciphers: 1 INFO:lib389.utils:Disabled ciphers: 70 INFO:lib389.utils:Checking nsSSLEnabledCiphers... INFO:lib389:1 results INFO:lib389:Results: INFO:lib389:dn: cn=encryption,cn=config INFO:lib389:enabledCipherCount: 1
Passed tickets/ticket47838_test.py::test_47838_run_11 2.10
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Expected error message: INFO:lib389.utils: INFO:lib389.utils:Checking nsSSLEnabledCiphers... INFO:lib389:1 results INFO:lib389:Results: INFO:lib389:dn: cn=encryption,cn=config INFO:lib389:enabledCipherCount: 0
Passed tickets/ticket47838_test.py::test_47928_run_0 7.24
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 13 - No SSL version config parameters INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Expected message: INFO:lib389.utils:
Passed tickets/ticket47838_test.py::test_47928_run_1 1.25
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 14 - No nsSSL3, nsTLS1; sslVersionMin > sslVersionMax INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Expected message: INFO:lib389.utils: INFO:lib389.utils:Expected message: INFO:lib389.utils:
Passed tickets/ticket47838_test.py::test_47928_run_2 2.26
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 15 - nsSSL3: on; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2 INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Expected message: INFO:lib389.utils: INFO:lib389.utils:Expected message: INFO:lib389.utils: INFO:lib389.utils:Expected message: INFO:lib389.utils:
Passed tickets/ticket47838_test.py::test_47928_run_3 2.25
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 16 - nsSSL3: on; nsTLS1: off; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2 INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Expected message: INFO:lib389.utils: INFO:lib389.utils:Expected message: INFO:lib389.utils: INFO:lib389.utils:Expected message: INFO:lib389.utils:
Passed tickets/ticket47838_test.py::test_47838_run_last 2.22
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### Test Case 17 - Check nsSSL3Ciphers: all, which is invalid INFO:lib389:####### INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Expected error message: INFO:lib389.utils: INFO:lib389.utils:Checking nsSSLEnabledCiphers... INFO:lib389:1 results INFO:lib389:Results: INFO:lib389:dn: cn=encryption,cn=config INFO:lib389:enabledCipherCount: 0 INFO:lib389:ticket47838, 47880, 47908, 47928 were successfully verified.
Passed tickets/ticket47910_test.py::test_ticket47910_logconv_start_end_positive 0.43
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ---------------------------- Captured stderr setup -----------------------------
INFO:lib389.utils:Diable access log buffering INFO:lib389.utils:Do a ldapsearch operation INFO:lib389.utils:sleep for sometime so that access log file get generated ----------------------------- Captured stderr call -----------------------------
INFO:lib389.utils:Running test_ticket47910 - Execute logconv.pl -S -E with random values INFO:lib389.utils:taking current time with offset of 2 mins and formatting it to feed -S INFO:lib389.utils:taking current time with offset of 2 mins and formatting it to feed -E INFO:lib389.utils:Executing logconv.pl with -S and -E INFO:lib389.utils:Executing logconv.pl with -S current time and -E end time INFO:lib389.utils:/usr/bin/logconv.pl -S [15/Mar/2017:04:28:55] -E [15/Mar/2017:04:32:55] /var/log/dirsrv/slapd-standalone_1/access INFO:lib389.utils:standard outputAccess Log Analyzer 8.2 Command: logconv.pl /var/log/dirsrv/slapd-standalone_1/access Processing 1 Access Log(s)... [001] /var/log/dirsrv/slapd-standalone_1/access size (bytes): 690 Total Log Lines Analysed: 7 ----------- Access Log Output ------------ Start of Logs: 15/Mar/2017:04:28:55 End of Logs: 15/Mar/2017:04:30:54.815760043 Processed Log Time: 0 Hours, 1 Minutes, 59.815757824 Seconds Restarts: 1 Peak Concurrent Connections: 1 Total Operations: 3 Total Results: 3 Overall Performance: 100.0% Total Connections: 1 (0.01/sec) (0.50/min) - LDAP Connections: 1 (0.01/sec) (0.50/min) - LDAPI Connections: 0 (0.00/sec) (0.00/min) - LDAPS Connections: 0 (0.00/sec) (0.00/min) - StartTLS Extended Ops: 0 (0.00/sec) (0.00/min) Searches: 1 (0.01/sec) (0.50/min) Modifications: 1 (0.01/sec) (0.50/min) Adds: 0 (0.00/sec) (0.00/min) Deletes: 0 (0.00/sec) (0.00/min) Mod RDNs: 0 (0.00/sec) (0.00/min) Compares: 0 (0.00/sec) (0.00/min) Binds: 1 (0.01/sec) (0.50/min) Proxied Auth Operations: 0 Persistent Searches: 0 Internal Operations: 0 Entry Operations: 0 Extended Operations: 0 Abandoned Requests: 0 Smart Referrals Received: 0 VLV Operations: 0 VLV Unindexed Searches: 0 VLV Unindexed Components: 0 SORT Operations: 0 Entire Search Base Queries: 1 Paged Searches: 0 Unindexed Searches: 0 Unindexed Components: 1 FDs Taken: 1 FDs Returned: 0 Highest FD Taken: 64 Broken Pipes: 0 Connections Reset By Peer: 0 Resource Unavailable: 0 Max BER Size Exceeded: 0 Binds: 1 Unbinds: 0 - LDAP v2 Binds: 0 - LDAP v3 Binds: 1 - AUTOBINDs: 0 - SSL Client Binds: 0 - Failed SSL Client Binds: 0 - SASL Binds: 0 - Directory Manager Binds: 0 - Anonymous Binds: 0 - Other Binds: 1 Cleaning up temp files... Done. INFO:lib389.utils:standard errors
Passed tickets/ticket47910_test.py::test_ticket47910_logconv_start_end_negative 0.12
----------------------------- Captured stderr call -----------------------------
INFO:lib389.utils:Running test_ticket47910 - Execute logconv.pl -S -E with starttime>endtime INFO:lib389.utils:taking current time with offset of 2 mins and formatting it to feed -S INFO:lib389.utils:taking current time with offset of 2 mins and formatting it to feed -E INFO:lib389.utils:Executing logconv.pl with -S and -E INFO:lib389.utils:Executing logconv.pl with -S current time and -E end time INFO:lib389.utils:/usr/bin/logconv.pl -S [15/Mar/2017:04:32:56] -E [15/Mar/2017:04:28:56] /var/log/dirsrv/slapd-standalone_1/access INFO:lib389.utils:standard outputAccess Log Analyzer 8.2 Command: logconv.pl /var/log/dirsrv/slapd-standalone_1/access Start time ([15/Mar/2017:04:32:56]) is greater than end time ([15/Mar/2017:04:28:56])! Cleaning up temp files... Done. INFO:lib389.utils:standard errors
Passed tickets/ticket47910_test.py::test_ticket47910_logconv_start_end_invalid 0.12
----------------------------- Captured stderr call -----------------------------
INFO:lib389.utils:Running test_ticket47910 - Execute logconv.pl -S -E with invalid timestamp INFO:lib389.utils:Set start time and end time to invalid values INFO:lib389.utils:Executing logconv.pl with -S and -E INFO:lib389.utils:Executing logconv.pl with -S current time and -E end time INFO:lib389.utils:/usr/bin/logconv.pl -S invalid -E invalid /var/log/dirsrv/slapd-standalone_1/access INFO:lib389.utils:standard outputAccess Log Analyzer 8.2 Command: logconv.pl /var/log/dirsrv/slapd-standalone_1/access The date string (invalid) is invalid, exiting... Cleaning up temp files... Done. INFO:lib389.utils:standard errors
Passed tickets/ticket47910_test.py::test_ticket47910_logconv_noaccesslogs 0.09
----------------------------- Captured stderr call -----------------------------
INFO:lib389.utils:Running test_ticket47910 - Execute logconv.pl without access logs INFO:lib389.utils:taking current time with offset of 2 mins and formatting it to feed -S INFO:lib389.utils:Executing logconv.pl with -S current time INFO:lib389.utils:/usr/bin/logconv.pl -S [15/Mar/2017:04:28:56] INFO:lib389.utils:standard outputThere are no access logs specified, or the tool options have not been used correctly! Cleaning up temp files... Done. INFO:lib389.utils:standard errors
Passed tickets/ticket47920_test.py::test_ticket47920_init 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed tickets/ticket47920_test.py::test_ticket47920_mod_readentry_ctrl 0.00
----------------------------- Captured stdout call -----------------------------
['final description'] ----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### INFO:lib389:####### MOD: with a readentry control INFO:lib389:####### INFO:lib389:############################################### INFO:lib389:Check the initial value of the entry
Passed tickets/ticket47921_test.py::test_ticket47921 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47921_test:Test complete
Passed tickets/ticket47927_test.py::test_ticket47927_init 2.19
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed tickets/ticket47927_test.py::test_ticket47927_one 0.00
----------------------------- Captured stderr call -----------------------------
CRITICAL:tests.tickets.ticket47927_test:test_ticket47927_one: Failed (expected) to set the telephonenumber for cn=test_2,cn=enforced_container,dc=example,dc=com: Constraint violation CRITICAL:tests.tickets.ticket47927_test:test_ticket47927_one: Failed (expected) to set the telephonenumber for cn=test_3,cn=excluded_container,dc=example,dc=com: Constraint violation
Passed tickets/ticket47927_test.py::test_ticket47927_two 2.19
No log output captured.
Passed tickets/ticket47927_test.py::test_ticket47927_three 0.00
----------------------------- Captured stderr call -----------------------------
CRITICAL:tests.tickets.ticket47927_test:test_ticket47927_three: Failed (expected) to set the telephonenumber for cn=test_2,cn=enforced_container,dc=example,dc=com: Constraint violation CRITICAL:tests.tickets.ticket47927_test:test_ticket47927_three: success to set the telephonenumber for cn=test_3,cn=excluded_container,dc=example,dc=com
Passed tickets/ticket47927_test.py::test_ticket47927_four 0.00
----------------------------- Captured stderr call -----------------------------
CRITICAL:tests.tickets.ticket47927_test:test_ticket47927_four: success to set the telephonenumber for cn=test_3,cn=excluded_container,dc=example,dc=com CRITICAL:tests.tickets.ticket47927_test:test_ticket47927_four: Failed (expected) to set the telephonenumber for cn=test_2,cn=enforced_container,dc=example,dc=com: Constraint violation
Passed tickets/ticket47927_test.py::test_ticket47927_five 2.20
No log output captured.
Passed tickets/ticket47927_test.py::test_ticket47927_six 0.00
----------------------------- Captured stderr call -----------------------------
CRITICAL:tests.tickets.ticket47927_test:test_ticket47927_six: Failed (expected) to set the telephonenumber for cn=test_2,cn=enforced_container,dc=example,dc=com: Constraint violation CRITICAL:tests.tickets.ticket47927_test:test_ticket47927_six: success to set the telephonenumber for cn=test_3,cn=excluded_container,dc=example,dc=com CRITICAL:tests.tickets.ticket47927_test:test_ticket47927_six: success to set the telephonenumber for cn=test_4,cn=excluded_bis_container,dc=example,dc=com
Passed tickets/ticket47931_test.py::test_ticket47931 13.64
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:List backend with suffix=dc=deadlock INFO:lib389:Creating a local backend INFO:lib389:List backend cn=deadlock,cn=ldbm database,cn=plugins,cn=config INFO:lib389:Found entry dn: cn=deadlock,cn=ldbm database,cn=plugins,cn=config cn: deadlock nsslapd-cachememsize: 512000 nsslapd-cachesize: -1 nsslapd-directory: /var/lib/dirsrv/slapd-standalone_1/db/deadlock nsslapd-dncachememsize: 16777216 nsslapd-readonly: off nsslapd-require-index: off nsslapd-suffix: dc=deadlock objectClass: top objectClass: extensibleObject objectClass: nsBackendInstance INFO:lib389:Entry dn: cn="dc=deadlock",cn=mapping tree,cn=config cn: dc=deadlock nsslapd-backend: deadlock nsslapd-state: backend objectclass: top objectclass: extensibleObject objectclass: nsMappingTree INFO:lib389:Found entry dn: cn=dc\3Ddeadlock,cn=mapping tree,cn=config cn: dc=deadlock nsslapd-backend: deadlock nsslapd-state: backend objectClass: top objectClass: extensibleObject objectClass: nsMappingTree INFO:tests.tickets.ticket47931_test:Adding members to the group... INFO:tests.tickets.ticket47931_test:Modify second suffix... INFO:tests.tickets.ticket47931_test:Finished adding members to the group. INFO:tests.tickets.ticket47931_test:Finished modifying second suffix INFO:tests.tickets.ticket47931_test:Test complete
Passed tickets/ticket47937_test.py::test_ticket47937 5.20
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47937_test:Creating "ou=people"... INFO:tests.tickets.ticket47937_test:Creating "ou=ranges"... INFO:tests.tickets.ticket47937_test:Creating "cn=entry"... INFO:tests.tickets.ticket47937_test:Creating DNA shared config entry... INFO:tests.tickets.ticket47937_test:Add dna plugin config entry... INFO:tests.tickets.ticket47937_test:Enable the DNA plugin... INFO:tests.tickets.ticket47937_test:Restarting the server... INFO:tests.tickets.ticket47937_test:Apply an invalid attribute to the DNA config(dnaType: foo)... INFO:tests.tickets.ticket47937_test:Operation failed as expected (error: Server is unwilling to perform)
Passed tickets/ticket47953_test.py::test_ticket47953 4.02
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47953_test:Testing Ticket 47953 - Test we can delete aci that has invalid syntax INFO:lib389:Import task import_03152017_043148 for file /var/lib/dirsrv/slapd-standalone_1/ldif/ticket47953.ldif completed successfully INFO:tests.tickets.ticket47953_test:Attempting to remove invalid aci... INFO:tests.tickets.ticket47953_test:Removed invalid aci.
Passed tickets/ticket47963_test.py::test_ticket47963 5.20
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47963_test:Test complete
Passed tickets/ticket47970_test.py::test_ticket47970 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47970_test:Testing Ticket 47970 - Testing that a failed SASL bind does not trigger account lockout INFO:tests.tickets.ticket47970_test:account lockout enabled. INFO:tests.tickets.ticket47970_test:passwordMaxFailure set. INFO:tests.tickets.ticket47970_test:SASL Bind failed as expected INFO:tests.tickets.ticket47970_test:Root DSE was correctly not updated
Passed tickets/ticket47973_test.py::test_ticket47973 0.94
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47973_test:Testing Ticket 47973 - Test the searches still work as expected during schema reload tasks
Passed tickets/ticket47973_test.py::test_ticket47973_case 10.17
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47973_test:Testing Ticket 47973 (case) - Test the cases in the original schema are preserved. INFO:tests.tickets.ticket47973_test:case 1 - Test the cases in the original schema are preserved. INFO:lib389:Schema Reload task (task-03152017_043218) completed successfully INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.0 NAME 'top' ABSTRACT MUST objectClass X-ORIGIN 'RFC 4512' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.1 NAME 'alias' SUP top STRUCTURAL MUST aliasedObjectName X-ORIGIN 'RFC 4512' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.20.1 NAME 'subschema' AUXILIARY MAY ( dITStructureRules $ nameForms $ ditContentRules $ objectClasses $ attributeTypes $ matchingRules $ matchingRuleUse ) X-ORIGIN 'RFC 4512' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.1466.101.120.111 NAME 'extensibleObject' SUP top AUXILIARY X-ORIGIN 'RFC 4512' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.11 NAME 'applicationProcess' SUP top STRUCTURAL MUST cn MAY ( seeAlso $ ou $ l $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.2 NAME 'country' SUP top STRUCTURAL MUST c MAY ( searchGuide $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.1466.344 NAME 'dcObject' SUP top AUXILIARY MUST dc X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.14 NAME 'device' SUP top STRUCTURAL MUST cn MAY ( serialNumber $ seeAlso $ owner $ ou $ o $ l $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.9 NAME 'groupOfNames' SUP top STRUCTURAL MUST cn MAY ( member $ businessCategory $ seeAlso $ owner $ ou $ o $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.17 NAME 'groupOfUniqueNames' SUP top STRUCTURAL MUST cn MAY ( uniqueMember $ businessCategory $ seeAlso $ owner $ ou $ o $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.3 NAME 'locality' SUP top STRUCTURAL MAY ( street $ seeAlso $ searchGuide $ st $ l $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.4 NAME 'organization' SUP top STRUCTURAL MUST o MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationalISDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.6 NAME 'person' SUP top STRUCTURAL MUST ( sn $ cn ) MAY ( userPassword $ telephoneNumber $ seeAlso $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.7 NAME 'organizationalPerson' SUP person STRUCTURAL MAY ( title $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ internationalISDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ ou $ st $ l ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.8 NAME 'organizationalRole' SUP top STRUCTURAL MUST cn MAY ( x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationalISDNNumber $ facsimileTelephoneNumber $ seeAlso $ roleOccupant $ preferredDeliveryMethod $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ ou $ st $ l $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.5 NAME 'organizationalUnit' SUP top STRUCTURAL MUST ou MAY ( businessCategory $ description $ destinationIndicator $ facsimileTelephoneNumber $ internationalISDNNumber $ l $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ postOfficeBox $ preferredDeliveryMethod $ registeredAddress $ searchGuide $ seeAlso $ st $ street $ telephoneNumber $ teletexTerminalIdentifier $ telexNumber $ userPassword $ x121Address ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.10 NAME 'residentialPerson' SUP person STRUCTURAL MUST l MAY ( businessCategory $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ internationalISDNNumber $ facsimileTelephoneNumber $ preferredDeliveryMethod $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.3.1 NAME 'uidObject' SUP top AUXILIARY MUST uid X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113719.2.142.6.1.1 NAME 'ldapSubEntry' DESC 'LDAP Subentry class, version 1' SUP top STRUCTURAL MAY cn X-ORIGIN 'LDAP Subentry Internet Draft' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.40 NAME 'directoryServerFeature' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( oid $ cn $ multiLineDescription ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.41 NAME 'nsslapdPlugin' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsslapd-pluginPath $ nsslapd-pluginInitFunc $ nsslapd-pluginType $ nsslapd-pluginId $ nsslapd-pluginVersion $ nsslapd-pluginVendor $ nsslapd-pluginDescription $ nsslapd-pluginEnabled ) MAY ( nsslapd-pluginConfigArea $ nsslapd-plugin-depends-on-type ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.44 NAME 'nsIndex' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsSystemIndex ) MAY ( description $ nsIndexType $ nsMatchingRule $ nsIndexIDListScanLimit ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.109 NAME 'nsBackendInstance' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST CN X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.110 NAME 'nsMappingTree' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST CN X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.104 NAME 'nsContainer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST CN X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY ( cn $ nsds5ReplicaPreciseTombstonePurging $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ nsds5ReplicaChangeCount $ nsds5ReplicaLegacyConsumer $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaBackoffMin $ nsds5ReplicaBackoffMax $ nsds5ReplicaReleaseTimeout ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.113 NAME 'nsTombstone' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( nstombstonecsn $ nsParentUniqueId $ nscpEntryDN ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.103 NAME 'nsDS5ReplicationAgreement' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsds5ReplicaCleanRUVNotified $ nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicatedAttributeListTotal $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5ReplicaEnabled $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5ReplicaStripAttrs $ nsds5replicaSessionPauseTime $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaFlowControlWindow $ nsds5ReplicaFlowControlPause $ nsDS5ReplicaWaitForAsyncResults $ nsds5ReplicaIgnoreMissingChange ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.39 NAME 'nsslapdConfig' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY cn X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.317 NAME 'nsSaslMapping' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsSaslMapRegexString $ nsSaslMapBaseDNTemplate $ nsSaslMapFilterTemplate ) MAY nsSaslMapPriority X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.43 NAME 'nsSNMP' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsSNMPEnabled ) MAY ( nsSNMPOrganization $ nsSNMPLocation $ nsSNMPContact $ nsSNMPDescription $ nsSNMPName $ nsSNMPMasterHost $ nsSNMPMasterPort ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( nsEncryptionConfig-oid NAME 'nsEncryptionConfig' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsCertfile $ nsKeyfile $ nsSSL2 $ nsSSL3 $ nsTLS1 $ nsTLS10 $ nsTLS11 $ nsTLS12 $ sslVersionMin $ sslVersionMax $ nsSSLSessionTimeout $ nsSSL3SessionTimeout $ nsSSLClientAuth $ nsSSL2Ciphers $ nsSSL3Ciphers $ nsSSLSupportedCiphers $ allowWeakCipher $ CACertExtractFile $ allowWeakDHParam ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsEncryptionModule-oid NAME 'nsEncryptionModule' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsSSLToken $ nsSSLPersonalityssl $ nsSSLActivation $ ServerKeyExtractFile $ ServerCertExtractFile ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.327 NAME 'rootDNPluginConfig' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( rootdn-open-time $ rootdn-close-time $ rootdn-days-allowed $ rootdn-allow-host $ rootdn-deny-host $ rootdn-allow-ip $ rootdn-deny-ip ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.328 NAME 'nsSchemaPolicy' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( cn $ schemaUpdateObjectclassAccept $ schemaUpdateObjectclassReject $ schemaUpdateAttributeAccept $ schemaUpdateAttributeReject ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.31 NAME 'groupOfCertificates' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( memberCertificateDescription $ businessCategory $ description $ o $ ou $ owner $ seeAlso ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.33 NAME 'groupOfURLs' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( memberURL $ businessCategory $ description $ o $ ou $ owner $ seeAlso ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.35 NAME 'LDAPServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( description $ l $ ou $ seeAlso $ generation $ changeLogMaximumAge $ changeLogMaximumSize ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.250.3.18 NAME 'cacheObject' DESC 'object that contains the TTL (time to live) attribute type' SUP top STRUCTURAL MAY ttl X-ORIGIN 'LDAP Caching Internet Draft' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.10 NAME 'netscapeServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( description $ serverRoot $ serverProductName $ serverVersionNumber $ installationTimeStamp $ administratorContactInfo $ userpassword $ adminURL $ serverHostName ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.7 NAME 'nsLicenseUser' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( nsLicensedFor $ nsLicenseStartTime $ nsLicenseEndTime ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.1 NAME 'changeLogEntry' DESC 'LDAP changelog objectclass' SUP top STRUCTURAL MUST ( targetdn $ changeTime $ changenumber $ changeType ) MAY ( changes $ newrdn $ deleteoldrdn $ newsuperior ) X-ORIGIN 'Changelog Internet Draft' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.6 NAME 'referral' DESC 'LDAP referrals objectclass' SUP top STRUCTURAL MAY ref X-ORIGIN 'LDAPv3 referrals Internet Draft' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.12 NAME 'passwordObject' DESC 'Netscape defined password policy objectclass' SUP top STRUCTURAL MAY ( pwdpolicysubentry $ passwordExpirationTime $ passwordExpWarned $ passwordRetryCount $ retryCountResetTime $ accountUnlockTime $ passwordHistory $ passwordAllowChangeTime $ passwordGraceUserTime ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.13 NAME 'passwordPolicy' DESC 'Netscape defined password policy objectclass' SUP top STRUCTURAL MAY ( passwordMaxAge $ passwordExp $ passwordMinLength $ passwordKeepHistory $ passwordInHistory $ passwordChange $ passwordWarning $ passwordLockout $ passwordMaxFailure $ passwordResetDuration $ passwordUnlock $ passwordLockoutDuration $ passwordCheckSyntax $ passwordMustChange $ passwordStorageScheme $ passwordMinAge $ passwordResetFailureCount $ passwordGraceLimit $ passwordMinDigits $ passwordMinAlphas $ passwordMinUppers $ passwordMinLowers $ passwordMinSpecials $ passwordMin8bit $ passwordMaxRepeats $ passwordMinCategories $ passwordMinTokenLength $ passwordTrackUpdateTime $ passwordAdminDN ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.30 NAME 'glue' DESC 'Netscape defined objectclass' SUP top STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.32 NAME 'netscapeMachineData' DESC 'Netscape defined objectclass' SUP top STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.38 NAME 'vlvSearch' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ vlvBase $ vlvScope $ vlvFilter ) MAY multiLineDescription X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.42 NAME 'vlvIndex' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ vlvSort ) MAY ( vlvEnabled $ vlvUses ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.84 NAME 'cosDefinition' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( costargettree $ costemplatedn $ cosspecifier $ cosattribute $ aci $ cn $ uid ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.93 NAME 'nsRoleDefinition' DESC 'Netscape defined objectclass' SUP ldapSubEntry STRUCTURAL MAY ( description $ nsRoleScopeDN ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.94 NAME 'nsSimpleRoleDefinition' DESC 'Netscape defined objectclass' SUP nsRoleDefinition STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.95 NAME 'nsComplexRoleDefinition' DESC 'Netscape defined objectclass' SUP nsRoleDefinition STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.96 NAME 'nsManagedRoleDefinition' DESC 'Netscape defined objectclass' SUP nsSimpleRoleDefinition STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.97 NAME 'nsFilteredRoleDefinition' DESC 'Netscape defined objectclass' SUP nsComplexRoleDefinition STRUCTURAL MUST nsRoleFilter X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.98 NAME 'nsNestedRoleDefinition' DESC 'Netscape defined objectclass' SUP nsComplexRoleDefinition STRUCTURAL MUST nsRoleDN X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.99 NAME 'cosSuperDefinition' DESC 'Netscape defined objectclass' SUP ldapSubEntry STRUCTURAL MUST cosattribute MAY description X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.100 NAME 'cosClassicDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition STRUCTURAL MAY ( cosTemplateDn $ cosspecifier ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.101 NAME 'cosPointerDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition STRUCTURAL MAY cosTemplateDn X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.102 NAME 'cosIndirectDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition STRUCTURAL MAY cosIndirectSpecifier X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5replicaSessionPauseTime $ nsds7WindowsReplicaSubtree $ nsds7DirectoryReplicaSubtree $ nsds7NewWinUserSyncEnabled $ nsds7NewWinGroupSyncEnabled $ nsds7WindowsDomain $ nsds7DirsyncCookie $ winSyncInterval $ oneWaySync $ winSyncMoveAction $ nsds5ReplicaEnabled $ winSyncDirectoryFilter $ winSyncWindowsFilter $ winSyncSubtreePair ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.128 NAME 'costemplate' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( cn $ cospriority ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.304 NAME 'nsView' DESC 'Netscape defined objectclass' SUP top AUXILIARY MAY ( nsViewFilter $ description ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.316 NAME 'nsAttributeEncryption' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsEncryptionAlgorithm ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.21 NAME 'pkiUser' DESC 'X.509 PKI User' SUP top AUXILIARY MAY userCertificate X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.22 NAME 'pkiCA' DESC 'X.509 PKI Certificate Authority' SUP top AUXILIARY MAY ( cACertificate $ certificateRevocationList $ authorityRevocationList $ crossCertificatePair ) X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.19 NAME 'cRLDistributionPoint' DESC 'X.509 CRL distribution point' SUP top STRUCTURAL MUST cn MAY ( certificateRevocationList $ authorityRevocationList $ deltaRevocationList ) X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.23 NAME 'deltaCRL' DESC 'X.509 delta CRL' SUP top AUXILIARY MAY deltaRevocationList X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.15 NAME 'strongAuthenticationUser' DESC 'X.521 strong authentication user' SUP top AUXILIARY MUST userCertificate X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.18 NAME 'userSecurityInformation' DESC 'X.521 user security information' SUP top AUXILIARY MAY supportedAlgorithms X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.16 NAME 'certificationAuthority' DESC 'X.509 certificate authority' SUP top AUXILIARY MUST ( authorityRevocationList $ certificateRevocationList $ cACertificate ) MAY crossCertificatePair X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.16.2 NAME 'certificationAuthority-V2' DESC 'X.509 certificate authority, version 2' SUP certificationAuthority AUXILIARY MAY deltaRevocationList X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.5 NAME 'account' SUP top STRUCTURAL MUST uid MAY ( description $ seeAlso $ l $ o $ ou $ host ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.6 NAME 'document' SUP top STRUCTURAL MUST documentIdentifier MAY ( cn $ description $ seeAlso $ l $ o $ ou $ documentTitle $ documentVersion $ documentAuthor $ documentLocation $ documentPublisher ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.9 NAME 'documentSeries' SUP top STRUCTURAL MUST cn MAY ( description $ l $ o $ ou $ seeAlso $ telephonenumber ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.13 NAME 'domain' SUP top STRUCTURAL MUST dc MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l $ description $ o $ associatedName ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' SUP top AUXILIARY MUST associatedDomain X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.18 NAME 'friendlyCountry' SUP country STRUCTURAL MUST co X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.14 NAME 'rFC822localPart' SUP domain STRUCTURAL MAY ( cn $ sn ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.7 NAME 'room' SUP top STRUCTURAL MUST cn MAY ( roomNumber $ description $ seeAlso $ telephoneNumber ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObject' SUP top AUXILIARY MUST userPassword X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.2 NAME 'inetOrgPerson' SUP organizationalPerson STRUCTURAL MAY ( audio $ businessCategory $ carLicense $ departmentNumber $ displayName $ employeeNumber $ employeeType $ givenName $ homePhone $ homePostalAddress $ initials $ jpegPhoto $ labeledURI $ mail $ manager $ mobile $ o $ pager $ photo $ roomNumber $ secretary $ uid $ userCertificate $ x500uniqueIdentifier $ preferredLanguage $ userSMIMECertificate $ userPKCS12 ) X-ORIGIN 'RFC 2798' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.322 NAME 'autoMemberDefinition' DESC 'Auto Membership Config Definition Entry' SUP top STRUCTURAL MUST ( cn $ autoMemberScope $ autoMemberFilter $ autoMemberGroupingAttr ) MAY ( autoMemberDefaultGroup $ autoMemberDisabled ) X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.323 NAME 'autoMemberRegexRule' DESC 'Auto Membership Regex Rule Entry' SUP top STRUCTURAL MUST ( cn $ autoMemberTargetGroup ) MAY ( autoMemberExclusiveRegex $ autoMemberInclusiveRegex $ description ) X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.324 NAME 'dnaPluginConfig' DESC 'DNA plugin configuration' SUP top AUXILIARY MAY ( dnaType $ dnaPrefix $ dnaNextValue $ dnaMaxValue $ dnaInterval $ dnaMagicRegen $ dnaFilter $ dnaScope $ dnaExcludeScope $ dnaSharedCfgDN $ dnaThreshold $ dnaNextRange $ dnaRangeRequestTimeout $ dnaRemoteBindDN $ dnaRemoteBindCred $ cn ) X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.325 NAME 'dnaSharedConfig' DESC 'DNA Shared Configuration' SUP top AUXILIARY MAY ( dnaHostname $ dnaPortNum $ dnaSecurePortNum $ dnaRemoteBindMethod $ dnaRemoteConnProtocol $ dnaRemainingValues ) X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.319 NAME 'mepManagedEntry' DESC 'Managed Entries Managed Entry' SUP top AUXILIARY MAY mepManagedBy X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.320 NAME 'mepOriginEntry' DESC 'Managed Entries Origin Entry' SUP top AUXILIARY MAY mepManagedEntry X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.321 NAME 'mepTemplateEntry' DESC 'Managed Entries Template Entry' SUP top AUXILIARY MAY ( cn $ mepStaticAttr $ mepMappedAttr $ mepRDNAttr ) X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.0 NAME 'posixAccount' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST ( cn $ uid $ uidNumber $ gidNumber $ homeDirectory ) MAY ( userPassword $ loginShell $ gecos $ description ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.1 NAME 'shadowAccount' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST uid MAY ( userPassword $ shadowLastChange $ shadowMin $ shadowMax $ shadowWarning $ shadowInactive $ shadowExpire $ shadowFlag $ description ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.2 NAME 'posixGroup' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( cn $ gidNumber ) MAY ( userPassword $ memberUid $ description ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.3 NAME 'ipService' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( cn $ ipServicePort $ ipServiceProtocol ) MAY description X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.4 NAME 'ipProtocol' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( cn $ ipProtocolNumber ) MAY description X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.5 NAME 'oncRpc' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( cn $ oncRpcNumber ) MAY description X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.6 NAME 'ipHost' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST ( ipHostNumber $ cn ) MAY ( manager $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.7 NAME 'ipNetwork' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( ipNetworkNumber $ cn ) MAY ( ipNetmaskNumber $ manager $ l $ description ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.8 NAME 'nisNetgroup' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST cn MAY ( nisNetgroupTriple $ memberNisNetgroup $ description ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.10 NAME 'nisObject' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( cn $ nisMapEntry $ nisMapName ) MAY description X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.11 NAME 'ieee802Device' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST cn MAY ( macAddress $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.12 NAME 'bootableDevice' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST cn MAY ( bootFile $ bootParameter $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.13 NAME 'nisMap' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST nisMapName MAY description X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.129 NAME 'inetDomain' DESC 'Auxiliary class for virtual domain nodes' SUP top AUXILIARY MAY ( inetDomainBaseDN $ inetDomainStatus ) X-ORIGIN 'Netscape subscriber interoperability' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.130 NAME 'inetUser' DESC 'Auxiliary class which must be present in an entry for delivery of subscriber services' SUP top AUXILIARY MAY ( uid $ inetUserStatus $ inetUserHTTPURL $ userPassword $ memberOf ) X-ORIGIN 'Netscape subscriber interoperability' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.1466.101.120.141 NAME 'NetscapeLinkedOrganization' AUXILIARY MAY parentOrganization X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.1466.101.120.142 NAME 'NetscapePreferences' AUXILIARY MAY ( preferredLanguage $ preferredLocale $ preferredTimeZone ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.134 NAME 'inetSubscriber' SUP top AUXILIARY MAY ( inetSubscriberAccountId $ inetSubscriberChallenge $ inetSubscriberResponse ) X-ORIGIN 'Netscape subscriber interoperability' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.112 NAME 'inetAdmin' DESC 'Marker for an administrative group or user' SUP top AUXILIARY MAY ( aci $ memberof $ adminrole ) X-ORIGIN 'Netscape Delegated Administrator' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.42.2.27.4.2.1 NAME 'javaContainer' DESC 'Container for a Java object' SUP top STRUCTURAL MUST cn X-ORIGIN 'RFC 2713' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.42.2.27.4.2.4 NAME 'javaObject' DESC 'Java object representation' SUP top ABSTRACT MUST javaClassName MAY ( javaClassNames $ javaCodebase $ javaDoc $ description ) X-ORIGIN 'RFC 2713' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.42.2.27.4.2.5 NAME 'javaSerializedObject' DESC 'Java serialized object' SUP javaObject AUXILIARY MUST javaSerializedData X-ORIGIN 'RFC 2713' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.42.2.27.4.2.7 NAME 'javaNamingReference' DESC 'JNDI reference' SUP javaObject AUXILIARY MAY ( javaReferenceAddress $ javaFactory ) X-ORIGIN 'RFC 2713' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.42.2.27.4.2.8 NAME 'javaMarshalledObject' DESC 'Java marshalled object' SUP javaObject AUXILIARY MUST javaSerializedData X-ORIGIN 'RFC 2713' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.3 NAME 'pilotObject' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MAY ( audio $ dITRedirect $ info $ jpegPhoto $ lastModifiedBy $ lastModifiedTime $ manager $ photo $ uniqueIdentifier ) X-ORIGIN 'RFC 1274' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminDomain-oid NAME 'nsAdminDomain' DESC 'Netscape defined objectclass' SUP organizationalUnit STRUCTURAL MAY nsAdminDomainName X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsHost-oid NAME 'nsHost' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( serverHostName $ description $ l $ nsHostLocation $ nsHardwarePlatform $ nsOsVersion ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminGroup-oid NAME 'nsAdminGroup' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsAdminGroupName $ description $ nsConfigRoot $ nsAdminSIEDN ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsApplication-oid NAME 'nsApplication' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsVendor $ description $ nsProductName $ nsNickName $ nsProductVersion $ nsBuildNumber $ nsRevisionNumber $ nsSerialNumber $ nsInstalledLocation $ installationTimeStamp $ nsExpirationDate $ nsBuildSecurity $ nsLdapSchemaVersion $ nsServerMigrationClassname $ nsServerCreationClassname ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsResourceRef-oid NAME 'nsResourceRef' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY seeAlso X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsTask-oid NAME 'nsTask' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsTaskLabel $ nsHelpref $ nsExecref $ nsLogSuppress ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsTaskGroup-oid NAME 'nsTaskGroup' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY nsTaskLabel X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminObject-oid NAME 'nsAdminObject' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsJarFilename $ nsClassName ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsConfig-oid NAME 'nsConfig' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( description $ nsServerPort $ nsServerAddress $ nsSuiteSpotUser $ nsErrorLog $ nsPidLog $ nsAccessLog $ nsDefaultAcceptLanguage $ nsServerSecurity ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsDirectoryInfo-oid NAME 'nsDirectoryInfo' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsBindDN $ nsBindPassword $ nsDirectoryURL $ nsDirectoryFailoverList $ nsDirectoryInfoRef ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminServer-oid NAME 'nsAdminServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsServerID ) MAY description X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminConfig-oid NAME 'nsAdminConfig' DESC 'Netscape defined objectclass' SUP nsConfig STRUCTURAL MAY ( nsAdminCgiWaitPid $ nsAdminUsers $ nsAdminAccessHosts $ nsAdminAccessAddresses $ nsAdminOneACLDir $ nsAdminEnableDSGW $ nsAdminEnableEnduser $ nsAdminCacheLifetime ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminResourceEditorExtension-oid NAME 'nsAdminResourceEditorExtension' DESC 'Netscape defined objectclass' SUP nsAdminObject STRUCTURAL MAY ( nsAdminAccountInfo $ nsDeleteclassname ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminGlobalParameters-oid NAME 'nsAdminGlobalParameters' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsAdminEndUserHTMLIndex $ nsNickname ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsGlobalParameters-oid NAME 'nsGlobalParameters' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsUniqueAttribute $ nsUserIDFormat $ nsUserRDNComponent $ nsGroupRDNComponent $ nsWellKnownJarFiles $ nsNYR ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsDefaultObjectClasses-oid NAME 'nsDefaultObjectClasses' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY nsDefaultObjectClass X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminConsoleUser-oid NAME 'nsAdminConsoleUser' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY nsPreference X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsCustomView-oid NAME 'nsCustomView' DESC 'Netscape defined objectclass' SUP nsAdminObject STRUCTURAL MAY nsDisplayName X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsTopologyCustomView-oid NAME 'nsTopologyCustomView' DESC 'Netscape defined objectclass' SUP nsCustomView STRUCTURAL MAY nsViewConfiguration X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsTopologyPlugin-oid NAME 'nsTopologyPlugin' DESC 'Netscape defined objectclass' SUP nsAdminObject STRUCTURAL X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.18 NAME 'netscapeCertificateServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL X-ORIGIN 'Netscape Certificate Management System' ) INFO:tests.tickets.ticket47973_test:OC: ( nsCertificateServer-oid NAME 'nsCertificateServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST nsServerID MAY ( serverHostName $ nsServerPort $ nsCertConfig ) X-ORIGIN 'Netscape Certificate Management System' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.23 NAME 'netscapeDirectoryServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( nsDirectoryServer-oid NAME 'nsDirectoryServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST nsServerID MAY ( serverHostName $ nsServerPort $ nsSecureServerPort $ nsBindPassword $ nsBindDN $ nsBaseDN ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.8 NAME 'ntUser' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ntUserDomainId MAY ( description $ l $ ou $ seeAlso $ ntUserPriv $ ntUserHomeDir $ ntUserComment $ ntUserFlags $ ntUserScriptPath $ ntUserAuthFlags $ ntUserUsrComment $ ntUserParms $ ntUserWorkstations $ ntUserLastLogon $ ntUserLastLogoff $ ntUserAcctExpires $ ntUserMaxStorage $ ntUserUnitsPerWeek $ ntUserLogonHours $ ntUserBadPwCount $ ntUserNumLogons $ ntUserLogonServer $ ntUserCountryCode $ ntUserCodePage $ ntUserUniqueId $ ntUserPrimaryGroupId $ ntUserProfile $ ntUserHomeDirDrive $ ntUserPasswordExpired $ ntUserCreateNewAccount $ ntUserDeleteAccount $ ntUniqueId $ ntUserNtPassword ) X-ORIGIN 'Netscape NT Synchronization' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.9 NAME 'ntGroup' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ntUserDomainId MAY ( description $ l $ ou $ seeAlso $ ntGroupId $ ntGroupAttributes $ ntGroupCreateNewGroup $ ntGroupDeleteGroup $ ntGroupType $ ntUniqueId $ mail ) X-ORIGIN 'Netscape NT Synchronization' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.82 NAME 'nsChangelog4Config' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY cn X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.114 NAME 'nsConsumer4Config' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY cn X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.36 NAME 'LDAPReplica' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( description $ l $ ou $ seeAlso $ replicaRoot $ replicaHost $ replicaPort $ replicaBindDn $ replicaCredentials $ replicaBindMethod $ replicaUseSSL $ replicaUpdateSchedule $ replicaUpdateReplayed $ replicaUpdateFailedAt $ replicaBeginORC $ replicaNickname $ replicaEntryFilter $ replicatedAttributeList $ replicaCFUpdated $ replicaAbandonedChanges $ replicaLastRelevantChange ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.11 NAME 'cirReplicaSource' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( cirReplicaRoot $ cirHost $ cirPort $ cirBindDN $ cirUsePersistentSearch $ cirUseSSL $ cirBindCredentials $ cirLastUpdateApplied $ cirUpdateSchedule $ cirSyncInterval $ cirUpdateFailedAt $ cirBeginORC $ replicaNickname $ replicaEntryFilter $ replicatedAttributeList ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.3 NAME 'mailRecipient' DESC 'Netscape Messaging Server 4.x defined objectclass' SUP top AUXILIARY MAY ( cn $ mail $ mailAlternateAddress $ mailHost $ mailRoutingAddress $ mailAccessDomain $ mailAutoReplyMode $ mailAutoReplyText $ mailDeliveryOption $ mailForwardingAddress $ mailMessageStore $ mailProgramDeliveryInfo $ mailQuota $ multiLineDescription $ uid $ userPassword ) X-ORIGIN 'Netscape Messaging Server 4.x' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.113730.3.2.37 NAME 'nsMessagingServerUser' DESC 'Netscape Messaging Server 4.x defined objectclass' SUP top AUXILIARY MAY ( cn $ mailAccessDomain $ mailAutoReplyMode $ mailAutoReplyText $ mailDeliveryOption $ mailForwardingAddress $ mailMessageStore $ mailProgramDeliveryInfo $ mailQuota $ nsmsgDisallowAccess $ nsmsgNumMsgQuota $ nswmExtendedUserPrefs $ vacationstartdate $ vacationenddate ) X-ORIGIN 'Netscape Messaging Server 4.x' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.4 NAME 'mailGroup' DESC 'Netscape Messaging Server 4.x defined objectclass' SUP top AUXILIARY MAY ( cn $ mail $ mailAlternateAddress $ mailHost $ mailRoutingAddress $ mgrpAddHeader $ mgrpAllowedBroadcaster $ mgrpAllowedDomain $ mgrpApprovePassword $ mgrpBroadcasterPolicy $ mgrpDeliverTo $ mgrpErrorsTo $ mgrpModerator $ mgrpMsgMaxSize $ mgrpMsgRejectAction $ mgrpMsgRejectText $ mgrpNoDuplicateChecks $ mgrpRemoveHeader $ mgrpRFC822MailMember $ owner ) X-ORIGIN 'Netscape Messaging Server 4.x' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.5 NAME 'groupOfMailEnhancedUniqueNames' DESC 'Netscape Messaging Server 4.x defined objectclass' SUP top AUXILIARY MUST cn MAY ( businessCategory $ description $ mailEnhancedUniqueMember $ o $ ou $ owner $ seeAlso ) X-ORIGIN 'Netscape Messaging Server 4.x' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.24 NAME 'netscapeMailServer' DESC 'Netscape Messaging Server 4.x defined objectclass' SUP top AUXILIARY X-ORIGIN 'Netscape Messaging Server 4.x' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.45 NAME 'nsValueItem' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsValueCIS $ nsValueCES $ nsValueTel $ nsValueInt $ nsValueBin $ nsValueDN $ nsValueType $ nsValueSyntax $ nsValueDescription $ nsValueHelpURL $ nsValueFlags $ nsValueDefault ) X-ORIGIN 'Netscape servers - value item' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.29 NAME 'netscapeWebServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsServerID ) MAY ( description $ nsServerPort ) X-ORIGIN 'Netscape Web Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.154 NAME 'netscapeReversiblePasswordObject' DESC 'object that contains an netscapeReversiblePassword' AUXILIARY MAY netscapeReversiblePassword X-ORIGIN 'Netscape Web Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.11.1.3.2.2.1 NAME 'accountPolicy' DESC 'Account policy entry' SUP top AUXILIARY MAY accountInactivityLimit X-ORIGIN 'Account Policy Plugin' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.17 NAME 'automount' DESC 'An entry in an automounter map' SUP top STRUCTURAL MUST ( cn $ automountInformation ) MAY description X-ORIGIN 'draft-howard-rfc2307bis' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.16 NAME 'automountMap' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.5923.1.1.2 NAME 'eduPerson' AUXILIARY MAY ( eduPersonAffiliation $ eduPersonNickName $ eduPersonOrgDN $ eduPersonOrgUnitDN $ eduPersonPrimaryAffiliation $ eduPersonPrincipalName $ eduPersonEntitlement $ eduPersonPrimaryOrgUnitDN $ eduPersonScopedAffiliation ) X-ORIGIN 'http://middleware.internet2.edu/eduperson/' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.13769.9.1 NAME 'mozillaAbPersonAlpha' SUP top AUXILIARY MUST cn MAY ( c $ description $ displayName $ facsimileTelephoneNumber $ givenName $ homePhone $ l $ mail $ mobile $ mozillaCustom1 $ mozillaCustom2 $ mozillaCustom3 $ mozillaCustom4 $ mozillaHomeCountryName $ mozillaHomeLocalityName $ mozillaHomePostalCode $ mozillaHomeState $ mozillaHomeStreet $ mozillaHomeStreet2 $ mozillaHomeUrl $ mozillaNickname $ mozillaSecondEmail $ mozillaUseHtmlMail $ mozillaWorkStreet2 $ mozillaWorkUrl $ nsAIMid $ o $ ou $ pager $ postalCode $ postOfficeBox $ sn $ st $ street $ telephoneNumber $ title ) X-ORIGIN 'Mozilla Address Book' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.5322.17.1.1 NAME 'authorizedServiceObject' DESC 'Auxiliary object class for adding authorizedService attribute' SUP top AUXILIARY MAY authorizedService X-ORIGIN 'NSS LDAP schema' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.5322.17.1.2 NAME 'hostObject' DESC 'Auxiliary object class for adding host attribute' SUP top AUXILIARY MAY host X-ORIGIN 'NSS LDAP schema' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.318 NAME 'pamConfig' DESC 'PAM plugin configuration' SUP top AUXILIARY MAY ( cn $ pamMissingSuffix $ pamExcludeSuffix $ pamIncludeSuffix $ pamIDAttr $ pamIDMapMethod $ pamFallback $ pamSecure $ pamService $ pamFilter ) X-ORIGIN 'Red Hat Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.326 NAME 'dynamicGroup' DESC 'Group containing internal dynamically-generated members' SUP posixGroup AUXILIARY MAY dsOnlyMemberUid X-ORIGIN 'Red Hat Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.6981.11.2.3 NAME 'PureFTPdUser' DESC 'PureFTPd user with optional quota, throttling and ratio' STRUCTURAL MAY ( FTPStatus $ FTPQuotaFiles $ FTPQuotaMBytes $ FTPUploadRatio $ FTPDownloadRatio $ FTPUploadBandwidth $ FTPDownloadBandwidth $ FTPuid $ FTPgid ) X-ORIGIN 'Pure-FTPd' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.2.840.113556.1.5.87 NAME 'calEntry' DESC 'RFC2739: Calendar Entry' SUP top AUXILIARY MAY ( calCalURI $ calFBURL $ calOtherCalURIs $ calOtherFBURLs $ calCAPURI $ calOtherCAPURIs ) X-ORIGIN 'rfc2739' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.18.0.2.6.258 NAME 'printerAbstract' DESC 'Printer related information.' SUP top ABSTRACT MAY ( printer-name $ printer-natural-language-configured $ printer-location $ printer-info $ printer-more-info $ printer-make-and-model $ printer-multiple-document-jobs-supported $ printer-charset-configured $ printer-charset-supported $ printer-generated-natural-language-supported $ printer-document-format-supported $ printer-color-supported $ printer-compression-supported $ printer-pages-per-minute $ printer-pages-per-minute-color $ printer-finishings-supported $ printer-number-up-supported $ printer-sides-supported $ printer-media-supported $ printer-media-local-supported $ printer-resolution-supported $ printer-print-quality-supported $ printer-job-priority-supported $ printer-copies-supported $ printer-job-k-octets-supported $ printer-current-operator $ printer-service-person $ printer-delivery-orientation-supported $ printer-stacking-order-supported $ printer-output-features-supported ) X-ORIGIN 'rfc3712' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.18.0.2.6.255 NAME 'printerService' DESC 'Printer information.' SUP printerAbstract STRUCTURAL MAY ( printer-uri $ printer-xri-supported ) X-ORIGIN 'rfc3712' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.18.0.2.6.257 NAME 'printerServiceAuxClass' DESC 'Printer information.' SUP printerAbstract AUXILIARY MAY ( printer-uri $ printer-xri-supported ) X-ORIGIN 'rfc3712' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.18.0.2.6.256 NAME 'printerIPP' DESC 'Internet Printing Protocol (IPP) information.' SUP top AUXILIARY MAY ( printer-ipp-versions-supported $ printer-multiple-document-jobs-supported ) X-ORIGIN 'rfc3712' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.18.0.2.6.253 NAME 'printerLPR' DESC 'LPR information.' SUP top AUXILIARY MUST printer-name MAY printer-aliases X-ORIGIN 'rfc3712' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.2312.4.3.4.1 NAME 'sabayonProfile' DESC 'sabayon profile' SUP top STRUCTURAL MUST cn MAY ( sabayonProfileURL $ description ) X-ORIGIN 'Sabayon' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.2312.4.3.4.2 NAME 'sabayonProfileNameObject' DESC 'contains sabayon profile name' SUP top AUXILIARY MUST sabayonProfileName X-ORIGIN 'Sabayon' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.2312.4.3.4.3 NAME 'sabayonProfileURLObject' DESC 'contains sabayon profile' SUP top AUXILIARY MUST cn MAY sabayonProfileURL X-ORIGIN 'Sabayon' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.15953.9.2.1 NAME 'sudoRole' DESC 'Sudoer Entries' SUP top STRUCTURAL MUST cn MAY ( sudoUser $ sudoHost $ sudoCommand $ sudoRunAs $ sudoRunAsUser $ sudoRunAsGroup $ sudoOption $ sudoNotBefore $ sudoNotAfter $ sudoOrder $ description ) X-ORIGIN 'SUDO' ) INFO:tests.tickets.ticket47973_test:OC: ( 5.3.6.1.1.1.2.0 NAME 'trustAccount' DESC 'Sets trust accounts information' SUP top AUXILIARY MUST trustModel MAY accessTo X-ORIGIN 'nss_ldap/pam_ldap' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.2.3.4.5.6.7 NAME 'MozillaObject' SUP top STRUCTURAL MUST cn MAY MoZiLLaaTTRiBuTe X-ORIGIN 'user defined' ) INFO:tests.tickets.ticket47973_test:case 1: MoZiLLaaTTRiBuTe is in the objectclasses list -- PASS INFO:tests.tickets.ticket47973_test:case 2 - Duplicated schema except cases are not loaded. INFO:lib389:Schema Reload task (task-03152017_043223) completed successfully INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.0 NAME 'top' ABSTRACT MUST objectClass X-ORIGIN 'RFC 4512' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.1 NAME 'alias' SUP top STRUCTURAL MUST aliasedObjectName X-ORIGIN 'RFC 4512' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.20.1 NAME 'subschema' AUXILIARY MAY ( dITStructureRules $ nameForms $ ditContentRules $ objectClasses $ attributeTypes $ matchingRules $ matchingRuleUse ) X-ORIGIN 'RFC 4512' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.1466.101.120.111 NAME 'extensibleObject' SUP top AUXILIARY X-ORIGIN 'RFC 4512' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.11 NAME 'applicationProcess' SUP top STRUCTURAL MUST cn MAY ( seeAlso $ ou $ l $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.2 NAME 'country' SUP top STRUCTURAL MUST c MAY ( searchGuide $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.1466.344 NAME 'dcObject' SUP top AUXILIARY MUST dc X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.14 NAME 'device' SUP top STRUCTURAL MUST cn MAY ( serialNumber $ seeAlso $ owner $ ou $ o $ l $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.9 NAME 'groupOfNames' SUP top STRUCTURAL MUST cn MAY ( member $ businessCategory $ seeAlso $ owner $ ou $ o $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.17 NAME 'groupOfUniqueNames' SUP top STRUCTURAL MUST cn MAY ( uniqueMember $ businessCategory $ seeAlso $ owner $ ou $ o $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.3 NAME 'locality' SUP top STRUCTURAL MAY ( street $ seeAlso $ searchGuide $ st $ l $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.4 NAME 'organization' SUP top STRUCTURAL MUST o MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationalISDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.6 NAME 'person' SUP top STRUCTURAL MUST ( sn $ cn ) MAY ( userPassword $ telephoneNumber $ seeAlso $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.7 NAME 'organizationalPerson' SUP person STRUCTURAL MAY ( title $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ internationalISDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ ou $ st $ l ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.8 NAME 'organizationalRole' SUP top STRUCTURAL MUST cn MAY ( x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationalISDNNumber $ facsimileTelephoneNumber $ seeAlso $ roleOccupant $ preferredDeliveryMethod $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ ou $ st $ l $ description ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.5 NAME 'organizationalUnit' SUP top STRUCTURAL MUST ou MAY ( businessCategory $ description $ destinationIndicator $ facsimileTelephoneNumber $ internationalISDNNumber $ l $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ postOfficeBox $ preferredDeliveryMethod $ registeredAddress $ searchGuide $ seeAlso $ st $ street $ telephoneNumber $ teletexTerminalIdentifier $ telexNumber $ userPassword $ x121Address ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.10 NAME 'residentialPerson' SUP person STRUCTURAL MUST l MAY ( businessCategory $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ internationalISDNNumber $ facsimileTelephoneNumber $ preferredDeliveryMethod $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l ) X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.3.1 NAME 'uidObject' SUP top AUXILIARY MUST uid X-ORIGIN 'RFC 4519' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113719.2.142.6.1.1 NAME 'ldapSubEntry' DESC 'LDAP Subentry class, version 1' SUP top STRUCTURAL MAY cn X-ORIGIN 'LDAP Subentry Internet Draft' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.40 NAME 'directoryServerFeature' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( oid $ cn $ multiLineDescription ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.41 NAME 'nsslapdPlugin' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsslapd-pluginPath $ nsslapd-pluginInitFunc $ nsslapd-pluginType $ nsslapd-pluginId $ nsslapd-pluginVersion $ nsslapd-pluginVendor $ nsslapd-pluginDescription $ nsslapd-pluginEnabled ) MAY ( nsslapd-pluginConfigArea $ nsslapd-plugin-depends-on-type ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.44 NAME 'nsIndex' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsSystemIndex ) MAY ( description $ nsIndexType $ nsMatchingRule $ nsIndexIDListScanLimit ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.109 NAME 'nsBackendInstance' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST CN X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.110 NAME 'nsMappingTree' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST CN X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.104 NAME 'nsContainer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST CN X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.108 NAME 'nsDS5Replica' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( nsDS5ReplicaRoot $ nsDS5ReplicaId ) MAY ( cn $ nsds5ReplicaPreciseTombstonePurging $ nsds5ReplicaCleanRUV $ nsds5ReplicaAbortCleanRUV $ nsDS5ReplicaType $ nsDS5ReplicaBindDN $ nsState $ nsDS5ReplicaName $ nsDS5Flags $ nsDS5Task $ nsDS5ReplicaReferral $ nsDS5ReplicaAutoReferral $ nsds5ReplicaPurgeDelay $ nsds5ReplicaTombstonePurgeInterval $ nsds5ReplicaChangeCount $ nsds5ReplicaLegacyConsumer $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaBackoffMin $ nsds5ReplicaBackoffMax $ nsds5ReplicaReleaseTimeout ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.113 NAME 'nsTombstone' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( nstombstonecsn $ nsParentUniqueId $ nscpEntryDN ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.103 NAME 'nsDS5ReplicationAgreement' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsds5ReplicaCleanRUVNotified $ nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicatedAttributeListTotal $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5ReplicaEnabled $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5ReplicaStripAttrs $ nsds5replicaSessionPauseTime $ nsds5ReplicaProtocolTimeout $ nsds5ReplicaFlowControlWindow $ nsds5ReplicaFlowControlPause $ nsDS5ReplicaWaitForAsyncResults $ nsds5ReplicaIgnoreMissingChange ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.39 NAME 'nsslapdConfig' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY cn X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.317 NAME 'nsSaslMapping' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsSaslMapRegexString $ nsSaslMapBaseDNTemplate $ nsSaslMapFilterTemplate ) MAY nsSaslMapPriority X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.43 NAME 'nsSNMP' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsSNMPEnabled ) MAY ( nsSNMPOrganization $ nsSNMPLocation $ nsSNMPContact $ nsSNMPDescription $ nsSNMPName $ nsSNMPMasterHost $ nsSNMPMasterPort ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( nsEncryptionConfig-oid NAME 'nsEncryptionConfig' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsCertfile $ nsKeyfile $ nsSSL2 $ nsSSL3 $ nsTLS1 $ nsTLS10 $ nsTLS11 $ nsTLS12 $ sslVersionMin $ sslVersionMax $ nsSSLSessionTimeout $ nsSSL3SessionTimeout $ nsSSLClientAuth $ nsSSL2Ciphers $ nsSSL3Ciphers $ nsSSLSupportedCiphers $ allowWeakCipher $ CACertExtractFile $ allowWeakDHParam ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsEncryptionModule-oid NAME 'nsEncryptionModule' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsSSLToken $ nsSSLPersonalityssl $ nsSSLActivation $ ServerKeyExtractFile $ ServerCertExtractFile ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.327 NAME 'rootDNPluginConfig' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( rootdn-open-time $ rootdn-close-time $ rootdn-days-allowed $ rootdn-allow-host $ rootdn-deny-host $ rootdn-allow-ip $ rootdn-deny-ip ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.328 NAME 'nsSchemaPolicy' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( cn $ schemaUpdateObjectclassAccept $ schemaUpdateObjectclassReject $ schemaUpdateAttributeAccept $ schemaUpdateAttributeReject ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.31 NAME 'groupOfCertificates' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( memberCertificateDescription $ businessCategory $ description $ o $ ou $ owner $ seeAlso ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.33 NAME 'groupOfURLs' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( memberURL $ businessCategory $ description $ o $ ou $ owner $ seeAlso ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.35 NAME 'LDAPServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( description $ l $ ou $ seeAlso $ generation $ changeLogMaximumAge $ changeLogMaximumSize ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.250.3.18 NAME 'cacheObject' DESC 'object that contains the TTL (time to live) attribute type' SUP top STRUCTURAL MAY ttl X-ORIGIN 'LDAP Caching Internet Draft' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.10 NAME 'netscapeServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( description $ serverRoot $ serverProductName $ serverVersionNumber $ installationTimeStamp $ administratorContactInfo $ userpassword $ adminURL $ serverHostName ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.7 NAME 'nsLicenseUser' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( nsLicensedFor $ nsLicenseStartTime $ nsLicenseEndTime ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.1 NAME 'changeLogEntry' DESC 'LDAP changelog objectclass' SUP top STRUCTURAL MUST ( targetdn $ changeTime $ changenumber $ changeType ) MAY ( changes $ newrdn $ deleteoldrdn $ newsuperior ) X-ORIGIN 'Changelog Internet Draft' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.6 NAME 'referral' DESC 'LDAP referrals objectclass' SUP top STRUCTURAL MAY ref X-ORIGIN 'LDAPv3 referrals Internet Draft' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.12 NAME 'passwordObject' DESC 'Netscape defined password policy objectclass' SUP top STRUCTURAL MAY ( pwdpolicysubentry $ passwordExpirationTime $ passwordExpWarned $ passwordRetryCount $ retryCountResetTime $ accountUnlockTime $ passwordHistory $ passwordAllowChangeTime $ passwordGraceUserTime ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.13 NAME 'passwordPolicy' DESC 'Netscape defined password policy objectclass' SUP top STRUCTURAL MAY ( passwordMaxAge $ passwordExp $ passwordMinLength $ passwordKeepHistory $ passwordInHistory $ passwordChange $ passwordWarning $ passwordLockout $ passwordMaxFailure $ passwordResetDuration $ passwordUnlock $ passwordLockoutDuration $ passwordCheckSyntax $ passwordMustChange $ passwordStorageScheme $ passwordMinAge $ passwordResetFailureCount $ passwordGraceLimit $ passwordMinDigits $ passwordMinAlphas $ passwordMinUppers $ passwordMinLowers $ passwordMinSpecials $ passwordMin8bit $ passwordMaxRepeats $ passwordMinCategories $ passwordMinTokenLength $ passwordTrackUpdateTime $ passwordAdminDN ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.30 NAME 'glue' DESC 'Netscape defined objectclass' SUP top STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.32 NAME 'netscapeMachineData' DESC 'Netscape defined objectclass' SUP top STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.38 NAME 'vlvSearch' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ vlvBase $ vlvScope $ vlvFilter ) MAY multiLineDescription X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.42 NAME 'vlvIndex' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ vlvSort ) MAY ( vlvEnabled $ vlvUses ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.84 NAME 'cosDefinition' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( costargettree $ costemplatedn $ cosspecifier $ cosattribute $ aci $ cn $ uid ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.93 NAME 'nsRoleDefinition' DESC 'Netscape defined objectclass' SUP ldapSubEntry STRUCTURAL MAY ( description $ nsRoleScopeDN ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.94 NAME 'nsSimpleRoleDefinition' DESC 'Netscape defined objectclass' SUP nsRoleDefinition STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.95 NAME 'nsComplexRoleDefinition' DESC 'Netscape defined objectclass' SUP nsRoleDefinition STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.96 NAME 'nsManagedRoleDefinition' DESC 'Netscape defined objectclass' SUP nsSimpleRoleDefinition STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.97 NAME 'nsFilteredRoleDefinition' DESC 'Netscape defined objectclass' SUP nsComplexRoleDefinition STRUCTURAL MUST nsRoleFilter X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.98 NAME 'nsNestedRoleDefinition' DESC 'Netscape defined objectclass' SUP nsComplexRoleDefinition STRUCTURAL MUST nsRoleDN X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.99 NAME 'cosSuperDefinition' DESC 'Netscape defined objectclass' SUP ldapSubEntry STRUCTURAL MUST cosattribute MAY description X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.100 NAME 'cosClassicDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition STRUCTURAL MAY ( cosTemplateDn $ cosspecifier ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.101 NAME 'cosPointerDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition STRUCTURAL MAY cosTemplateDn X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.102 NAME 'cosIndirectDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition STRUCTURAL MAY cosIndirectSpecifier X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5replicaSessionPauseTime $ nsds7WindowsReplicaSubtree $ nsds7DirectoryReplicaSubtree $ nsds7NewWinUserSyncEnabled $ nsds7NewWinGroupSyncEnabled $ nsds7WindowsDomain $ nsds7DirsyncCookie $ winSyncInterval $ oneWaySync $ winSyncMoveAction $ nsds5ReplicaEnabled $ winSyncDirectoryFilter $ winSyncWindowsFilter $ winSyncSubtreePair ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.128 NAME 'costemplate' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY ( cn $ cospriority ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.304 NAME 'nsView' DESC 'Netscape defined objectclass' SUP top AUXILIARY MAY ( nsViewFilter $ description ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.316 NAME 'nsAttributeEncryption' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsEncryptionAlgorithm ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.21 NAME 'pkiUser' DESC 'X.509 PKI User' SUP top AUXILIARY MAY userCertificate X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.22 NAME 'pkiCA' DESC 'X.509 PKI Certificate Authority' SUP top AUXILIARY MAY ( cACertificate $ certificateRevocationList $ authorityRevocationList $ crossCertificatePair ) X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.19 NAME 'cRLDistributionPoint' DESC 'X.509 CRL distribution point' SUP top STRUCTURAL MUST cn MAY ( certificateRevocationList $ authorityRevocationList $ deltaRevocationList ) X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.23 NAME 'deltaCRL' DESC 'X.509 delta CRL' SUP top AUXILIARY MAY deltaRevocationList X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.15 NAME 'strongAuthenticationUser' DESC 'X.521 strong authentication user' SUP top AUXILIARY MUST userCertificate X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.18 NAME 'userSecurityInformation' DESC 'X.521 user security information' SUP top AUXILIARY MAY supportedAlgorithms X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.16 NAME 'certificationAuthority' DESC 'X.509 certificate authority' SUP top AUXILIARY MUST ( authorityRevocationList $ certificateRevocationList $ cACertificate ) MAY crossCertificatePair X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.5.6.16.2 NAME 'certificationAuthority-V2' DESC 'X.509 certificate authority, version 2' SUP certificationAuthority AUXILIARY MAY deltaRevocationList X-ORIGIN 'RFC 4523' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.5 NAME 'account' SUP top STRUCTURAL MUST uid MAY ( description $ seeAlso $ l $ o $ ou $ host ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.6 NAME 'document' SUP top STRUCTURAL MUST documentIdentifier MAY ( cn $ description $ seeAlso $ l $ o $ ou $ documentTitle $ documentVersion $ documentAuthor $ documentLocation $ documentPublisher ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.9 NAME 'documentSeries' SUP top STRUCTURAL MUST cn MAY ( description $ l $ o $ ou $ seeAlso $ telephonenumber ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.13 NAME 'domain' SUP top STRUCTURAL MUST dc MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l $ description $ o $ associatedName ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' SUP top AUXILIARY MUST associatedDomain X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.18 NAME 'friendlyCountry' SUP country STRUCTURAL MUST co X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.14 NAME 'rFC822localPart' SUP domain STRUCTURAL MAY ( cn $ sn ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.7 NAME 'room' SUP top STRUCTURAL MUST cn MAY ( roomNumber $ description $ seeAlso $ telephoneNumber ) X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObject' SUP top AUXILIARY MUST userPassword X-ORIGIN 'RFC 4524' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.2 NAME 'inetOrgPerson' SUP organizationalPerson STRUCTURAL MAY ( audio $ businessCategory $ carLicense $ departmentNumber $ displayName $ employeeNumber $ employeeType $ givenName $ homePhone $ homePostalAddress $ initials $ jpegPhoto $ labeledURI $ mail $ manager $ mobile $ o $ pager $ photo $ roomNumber $ secretary $ uid $ userCertificate $ x500uniqueIdentifier $ preferredLanguage $ userSMIMECertificate $ userPKCS12 ) X-ORIGIN 'RFC 2798' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.322 NAME 'autoMemberDefinition' DESC 'Auto Membership Config Definition Entry' SUP top STRUCTURAL MUST ( cn $ autoMemberScope $ autoMemberFilter $ autoMemberGroupingAttr ) MAY ( autoMemberDefaultGroup $ autoMemberDisabled ) X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.323 NAME 'autoMemberRegexRule' DESC 'Auto Membership Regex Rule Entry' SUP top STRUCTURAL MUST ( cn $ autoMemberTargetGroup ) MAY ( autoMemberExclusiveRegex $ autoMemberInclusiveRegex $ description ) X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.324 NAME 'dnaPluginConfig' DESC 'DNA plugin configuration' SUP top AUXILIARY MAY ( dnaType $ dnaPrefix $ dnaNextValue $ dnaMaxValue $ dnaInterval $ dnaMagicRegen $ dnaFilter $ dnaScope $ dnaExcludeScope $ dnaSharedCfgDN $ dnaThreshold $ dnaNextRange $ dnaRangeRequestTimeout $ dnaRemoteBindDN $ dnaRemoteBindCred $ cn ) X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.325 NAME 'dnaSharedConfig' DESC 'DNA Shared Configuration' SUP top AUXILIARY MAY ( dnaHostname $ dnaPortNum $ dnaSecurePortNum $ dnaRemoteBindMethod $ dnaRemoteConnProtocol $ dnaRemainingValues ) X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.319 NAME 'mepManagedEntry' DESC 'Managed Entries Managed Entry' SUP top AUXILIARY MAY mepManagedBy X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.320 NAME 'mepOriginEntry' DESC 'Managed Entries Origin Entry' SUP top AUXILIARY MAY mepManagedEntry X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.321 NAME 'mepTemplateEntry' DESC 'Managed Entries Template Entry' SUP top AUXILIARY MAY ( cn $ mepStaticAttr $ mepMappedAttr $ mepRDNAttr ) X-ORIGIN '389 Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.0 NAME 'posixAccount' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST ( cn $ uid $ uidNumber $ gidNumber $ homeDirectory ) MAY ( userPassword $ loginShell $ gecos $ description ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.1 NAME 'shadowAccount' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST uid MAY ( userPassword $ shadowLastChange $ shadowMin $ shadowMax $ shadowWarning $ shadowInactive $ shadowExpire $ shadowFlag $ description ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.2 NAME 'posixGroup' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( cn $ gidNumber ) MAY ( userPassword $ memberUid $ description ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.3 NAME 'ipService' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( cn $ ipServicePort $ ipServiceProtocol ) MAY description X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.4 NAME 'ipProtocol' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( cn $ ipProtocolNumber ) MAY description X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.5 NAME 'oncRpc' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( cn $ oncRpcNumber ) MAY description X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.6 NAME 'ipHost' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST ( ipHostNumber $ cn ) MAY ( manager $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.7 NAME 'ipNetwork' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( ipNetworkNumber $ cn ) MAY ( ipNetmaskNumber $ manager $ l $ description ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.8 NAME 'nisNetgroup' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST cn MAY ( nisNetgroupTriple $ memberNisNetgroup $ description ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.10 NAME 'nisObject' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST ( cn $ nisMapEntry $ nisMapName ) MAY description X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.11 NAME 'ieee802Device' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST cn MAY ( macAddress $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.12 NAME 'bootableDevice' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST cn MAY ( bootFile $ bootParameter $ description $ l $ o $ ou $ owner $ seeAlso $ serialNumber ) X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.13 NAME 'nisMap' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MUST nisMapName MAY description X-ORIGIN 'RFC 2307' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.129 NAME 'inetDomain' DESC 'Auxiliary class for virtual domain nodes' SUP top AUXILIARY MAY ( inetDomainBaseDN $ inetDomainStatus ) X-ORIGIN 'Netscape subscriber interoperability' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.130 NAME 'inetUser' DESC 'Auxiliary class which must be present in an entry for delivery of subscriber services' SUP top AUXILIARY MAY ( uid $ inetUserStatus $ inetUserHTTPURL $ userPassword $ memberOf ) X-ORIGIN 'Netscape subscriber interoperability' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.1466.101.120.141 NAME 'NetscapeLinkedOrganization' AUXILIARY MAY parentOrganization X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.1466.101.120.142 NAME 'NetscapePreferences' AUXILIARY MAY ( preferredLanguage $ preferredLocale $ preferredTimeZone ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.134 NAME 'inetSubscriber' SUP top AUXILIARY MAY ( inetSubscriberAccountId $ inetSubscriberChallenge $ inetSubscriberResponse ) X-ORIGIN 'Netscape subscriber interoperability' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.112 NAME 'inetAdmin' DESC 'Marker for an administrative group or user' SUP top AUXILIARY MAY ( aci $ memberof $ adminrole ) X-ORIGIN 'Netscape Delegated Administrator' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.42.2.27.4.2.1 NAME 'javaContainer' DESC 'Container for a Java object' SUP top STRUCTURAL MUST cn X-ORIGIN 'RFC 2713' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.42.2.27.4.2.4 NAME 'javaObject' DESC 'Java object representation' SUP top ABSTRACT MUST javaClassName MAY ( javaClassNames $ javaCodebase $ javaDoc $ description ) X-ORIGIN 'RFC 2713' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.42.2.27.4.2.5 NAME 'javaSerializedObject' DESC 'Java serialized object' SUP javaObject AUXILIARY MUST javaSerializedData X-ORIGIN 'RFC 2713' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.42.2.27.4.2.7 NAME 'javaNamingReference' DESC 'JNDI reference' SUP javaObject AUXILIARY MAY ( javaReferenceAddress $ javaFactory ) X-ORIGIN 'RFC 2713' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.42.2.27.4.2.8 NAME 'javaMarshalledObject' DESC 'Java marshalled object' SUP javaObject AUXILIARY MUST javaSerializedData X-ORIGIN 'RFC 2713' ) INFO:tests.tickets.ticket47973_test:OC: ( 0.9.2342.19200300.100.4.3 NAME 'pilotObject' DESC 'Standard LDAP objectclass' SUP top STRUCTURAL MAY ( audio $ dITRedirect $ info $ jpegPhoto $ lastModifiedBy $ lastModifiedTime $ manager $ photo $ uniqueIdentifier ) X-ORIGIN 'RFC 1274' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminDomain-oid NAME 'nsAdminDomain' DESC 'Netscape defined objectclass' SUP organizationalUnit STRUCTURAL MAY nsAdminDomainName X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsHost-oid NAME 'nsHost' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( serverHostName $ description $ l $ nsHostLocation $ nsHardwarePlatform $ nsOsVersion ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminGroup-oid NAME 'nsAdminGroup' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsAdminGroupName $ description $ nsConfigRoot $ nsAdminSIEDN ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsApplication-oid NAME 'nsApplication' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsVendor $ description $ nsProductName $ nsNickName $ nsProductVersion $ nsBuildNumber $ nsRevisionNumber $ nsSerialNumber $ nsInstalledLocation $ installationTimeStamp $ nsExpirationDate $ nsBuildSecurity $ nsLdapSchemaVersion $ nsServerMigrationClassname $ nsServerCreationClassname ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsResourceRef-oid NAME 'nsResourceRef' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY seeAlso X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsTask-oid NAME 'nsTask' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsTaskLabel $ nsHelpref $ nsExecref $ nsLogSuppress ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsTaskGroup-oid NAME 'nsTaskGroup' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY nsTaskLabel X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminObject-oid NAME 'nsAdminObject' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsJarFilename $ nsClassName ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsConfig-oid NAME 'nsConfig' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( description $ nsServerPort $ nsServerAddress $ nsSuiteSpotUser $ nsErrorLog $ nsPidLog $ nsAccessLog $ nsDefaultAcceptLanguage $ nsServerSecurity ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsDirectoryInfo-oid NAME 'nsDirectoryInfo' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsBindDN $ nsBindPassword $ nsDirectoryURL $ nsDirectoryFailoverList $ nsDirectoryInfoRef ) X-ORIGIN 'Netscape' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminServer-oid NAME 'nsAdminServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsServerID ) MAY description X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminConfig-oid NAME 'nsAdminConfig' DESC 'Netscape defined objectclass' SUP nsConfig STRUCTURAL MAY ( nsAdminCgiWaitPid $ nsAdminUsers $ nsAdminAccessHosts $ nsAdminAccessAddresses $ nsAdminOneACLDir $ nsAdminEnableDSGW $ nsAdminEnableEnduser $ nsAdminCacheLifetime ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminResourceEditorExtension-oid NAME 'nsAdminResourceEditorExtension' DESC 'Netscape defined objectclass' SUP nsAdminObject STRUCTURAL MAY ( nsAdminAccountInfo $ nsDeleteclassname ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminGlobalParameters-oid NAME 'nsAdminGlobalParameters' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsAdminEndUserHTMLIndex $ nsNickname ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsGlobalParameters-oid NAME 'nsGlobalParameters' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsUniqueAttribute $ nsUserIDFormat $ nsUserRDNComponent $ nsGroupRDNComponent $ nsWellKnownJarFiles $ nsNYR ) X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsDefaultObjectClasses-oid NAME 'nsDefaultObjectClasses' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY nsDefaultObjectClass X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsAdminConsoleUser-oid NAME 'nsAdminConsoleUser' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY nsPreference X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsCustomView-oid NAME 'nsCustomView' DESC 'Netscape defined objectclass' SUP nsAdminObject STRUCTURAL MAY nsDisplayName X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsTopologyCustomView-oid NAME 'nsTopologyCustomView' DESC 'Netscape defined objectclass' SUP nsCustomView STRUCTURAL MAY nsViewConfiguration X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( nsTopologyPlugin-oid NAME 'nsTopologyPlugin' DESC 'Netscape defined objectclass' SUP nsAdminObject STRUCTURAL X-ORIGIN 'Netscape Administration Services' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.18 NAME 'netscapeCertificateServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL X-ORIGIN 'Netscape Certificate Management System' ) INFO:tests.tickets.ticket47973_test:OC: ( nsCertificateServer-oid NAME 'nsCertificateServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST nsServerID MAY ( serverHostName $ nsServerPort $ nsCertConfig ) X-ORIGIN 'Netscape Certificate Management System' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.23 NAME 'netscapeDirectoryServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( nsDirectoryServer-oid NAME 'nsDirectoryServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST nsServerID MAY ( serverHostName $ nsServerPort $ nsSecureServerPort $ nsBindPassword $ nsBindDN $ nsBaseDN ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.8 NAME 'ntUser' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ntUserDomainId MAY ( description $ l $ ou $ seeAlso $ ntUserPriv $ ntUserHomeDir $ ntUserComment $ ntUserFlags $ ntUserScriptPath $ ntUserAuthFlags $ ntUserUsrComment $ ntUserParms $ ntUserWorkstations $ ntUserLastLogon $ ntUserLastLogoff $ ntUserAcctExpires $ ntUserMaxStorage $ ntUserUnitsPerWeek $ ntUserLogonHours $ ntUserBadPwCount $ ntUserNumLogons $ ntUserLogonServer $ ntUserCountryCode $ ntUserCodePage $ ntUserUniqueId $ ntUserPrimaryGroupId $ ntUserProfile $ ntUserHomeDirDrive $ ntUserPasswordExpired $ ntUserCreateNewAccount $ ntUserDeleteAccount $ ntUniqueId $ ntUserNtPassword ) X-ORIGIN 'Netscape NT Synchronization' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.9 NAME 'ntGroup' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ntUserDomainId MAY ( description $ l $ ou $ seeAlso $ ntGroupId $ ntGroupAttributes $ ntGroupCreateNewGroup $ ntGroupDeleteGroup $ ntGroupType $ ntUniqueId $ mail ) X-ORIGIN 'Netscape NT Synchronization' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.82 NAME 'nsChangelog4Config' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY cn X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.114 NAME 'nsConsumer4Config' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MAY cn X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.36 NAME 'LDAPReplica' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( description $ l $ ou $ seeAlso $ replicaRoot $ replicaHost $ replicaPort $ replicaBindDn $ replicaCredentials $ replicaBindMethod $ replicaUseSSL $ replicaUpdateSchedule $ replicaUpdateReplayed $ replicaUpdateFailedAt $ replicaBeginORC $ replicaNickname $ replicaEntryFilter $ replicatedAttributeList $ replicaCFUpdated $ replicaAbandonedChanges $ replicaLastRelevantChange ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.11 NAME 'cirReplicaSource' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( cirReplicaRoot $ cirHost $ cirPort $ cirBindDN $ cirUsePersistentSearch $ cirUseSSL $ cirBindCredentials $ cirLastUpdateApplied $ cirUpdateSchedule $ cirSyncInterval $ cirUpdateFailedAt $ cirBeginORC $ replicaNickname $ replicaEntryFilter $ replicatedAttributeList ) X-ORIGIN 'Netscape Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.3 NAME 'mailRecipient' DESC 'Netscape Messaging Server 4.x defined objectclass' SUP top AUXILIARY MAY ( cn $ mail $ mailAlternateAddress $ mailHost $ mailRoutingAddress $ mailAccessDomain $ mailAutoReplyMode $ mailAutoReplyText $ mailDeliveryOption $ mailForwardingAddress $ mailMessageStore $ mailProgramDeliveryInfo $ mailQuota $ multiLineDescription $ uid $ userPassword ) X-ORIGIN 'Netscape Messaging Server 4.x' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.113730.3.2.37 NAME 'nsMessagingServerUser' DESC 'Netscape Messaging Server 4.x defined objectclass' SUP top AUXILIARY MAY ( cn $ mailAccessDomain $ mailAutoReplyMode $ mailAutoReplyText $ mailDeliveryOption $ mailForwardingAddress $ mailMessageStore $ mailProgramDeliveryInfo $ mailQuota $ nsmsgDisallowAccess $ nsmsgNumMsgQuota $ nswmExtendedUserPrefs $ vacationstartdate $ vacationenddate ) X-ORIGIN 'Netscape Messaging Server 4.x' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.4 NAME 'mailGroup' DESC 'Netscape Messaging Server 4.x defined objectclass' SUP top AUXILIARY MAY ( cn $ mail $ mailAlternateAddress $ mailHost $ mailRoutingAddress $ mgrpAddHeader $ mgrpAllowedBroadcaster $ mgrpAllowedDomain $ mgrpApprovePassword $ mgrpBroadcasterPolicy $ mgrpDeliverTo $ mgrpErrorsTo $ mgrpModerator $ mgrpMsgMaxSize $ mgrpMsgRejectAction $ mgrpMsgRejectText $ mgrpNoDuplicateChecks $ mgrpRemoveHeader $ mgrpRFC822MailMember $ owner ) X-ORIGIN 'Netscape Messaging Server 4.x' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.5 NAME 'groupOfMailEnhancedUniqueNames' DESC 'Netscape Messaging Server 4.x defined objectclass' SUP top AUXILIARY MUST cn MAY ( businessCategory $ description $ mailEnhancedUniqueMember $ o $ ou $ owner $ seeAlso ) X-ORIGIN 'Netscape Messaging Server 4.x' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.24 NAME 'netscapeMailServer' DESC 'Netscape Messaging Server 4.x defined objectclass' SUP top AUXILIARY X-ORIGIN 'Netscape Messaging Server 4.x' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.45 NAME 'nsValueItem' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST cn MAY ( nsValueCIS $ nsValueCES $ nsValueTel $ nsValueInt $ nsValueBin $ nsValueDN $ nsValueType $ nsValueSyntax $ nsValueDescription $ nsValueHelpURL $ nsValueFlags $ nsValueDefault ) X-ORIGIN 'Netscape servers - value item' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.29 NAME 'netscapeWebServer' DESC 'Netscape defined objectclass' SUP top STRUCTURAL MUST ( cn $ nsServerID ) MAY ( description $ nsServerPort ) X-ORIGIN 'Netscape Web Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.154 NAME 'netscapeReversiblePasswordObject' DESC 'object that contains an netscapeReversiblePassword' AUXILIARY MAY netscapeReversiblePassword X-ORIGIN 'Netscape Web Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.11.1.3.2.2.1 NAME 'accountPolicy' DESC 'Account policy entry' SUP top AUXILIARY MAY accountInactivityLimit X-ORIGIN 'Account Policy Plugin' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.17 NAME 'automount' DESC 'An entry in an automounter map' SUP top STRUCTURAL MUST ( cn $ automountInformation ) MAY description X-ORIGIN 'draft-howard-rfc2307bis' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.1.1.2.16 NAME 'automountMap' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.5923.1.1.2 NAME 'eduPerson' AUXILIARY MAY ( eduPersonAffiliation $ eduPersonNickName $ eduPersonOrgDN $ eduPersonOrgUnitDN $ eduPersonPrimaryAffiliation $ eduPersonPrincipalName $ eduPersonEntitlement $ eduPersonPrimaryOrgUnitDN $ eduPersonScopedAffiliation ) X-ORIGIN 'http://middleware.internet2.edu/eduperson/' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.13769.9.1 NAME 'mozillaAbPersonAlpha' SUP top AUXILIARY MUST cn MAY ( c $ description $ displayName $ facsimileTelephoneNumber $ givenName $ homePhone $ l $ mail $ mobile $ mozillaCustom1 $ mozillaCustom2 $ mozillaCustom3 $ mozillaCustom4 $ mozillaHomeCountryName $ mozillaHomeLocalityName $ mozillaHomePostalCode $ mozillaHomeState $ mozillaHomeStreet $ mozillaHomeStreet2 $ mozillaHomeUrl $ mozillaNickname $ mozillaSecondEmail $ mozillaUseHtmlMail $ mozillaWorkStreet2 $ mozillaWorkUrl $ nsAIMid $ o $ ou $ pager $ postalCode $ postOfficeBox $ sn $ st $ street $ telephoneNumber $ title ) X-ORIGIN 'Mozilla Address Book' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.5322.17.1.1 NAME 'authorizedServiceObject' DESC 'Auxiliary object class for adding authorizedService attribute' SUP top AUXILIARY MAY authorizedService X-ORIGIN 'NSS LDAP schema' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.5322.17.1.2 NAME 'hostObject' DESC 'Auxiliary object class for adding host attribute' SUP top AUXILIARY MAY host X-ORIGIN 'NSS LDAP schema' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.318 NAME 'pamConfig' DESC 'PAM plugin configuration' SUP top AUXILIARY MAY ( cn $ pamMissingSuffix $ pamExcludeSuffix $ pamIncludeSuffix $ pamIDAttr $ pamIDMapMethod $ pamFallback $ pamSecure $ pamService $ pamFilter ) X-ORIGIN 'Red Hat Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 2.16.840.1.113730.3.2.326 NAME 'dynamicGroup' DESC 'Group containing internal dynamically-generated members' SUP posixGroup AUXILIARY MAY dsOnlyMemberUid X-ORIGIN 'Red Hat Directory Server' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.6981.11.2.3 NAME 'PureFTPdUser' DESC 'PureFTPd user with optional quota, throttling and ratio' STRUCTURAL MAY ( FTPStatus $ FTPQuotaFiles $ FTPQuotaMBytes $ FTPUploadRatio $ FTPDownloadRatio $ FTPUploadBandwidth $ FTPDownloadBandwidth $ FTPuid $ FTPgid ) X-ORIGIN 'Pure-FTPd' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.2.840.113556.1.5.87 NAME 'calEntry' DESC 'RFC2739: Calendar Entry' SUP top AUXILIARY MAY ( calCalURI $ calFBURL $ calOtherCalURIs $ calOtherFBURLs $ calCAPURI $ calOtherCAPURIs ) X-ORIGIN 'rfc2739' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.18.0.2.6.258 NAME 'printerAbstract' DESC 'Printer related information.' SUP top ABSTRACT MAY ( printer-name $ printer-natural-language-configured $ printer-location $ printer-info $ printer-more-info $ printer-make-and-model $ printer-multiple-document-jobs-supported $ printer-charset-configured $ printer-charset-supported $ printer-generated-natural-language-supported $ printer-document-format-supported $ printer-color-supported $ printer-compression-supported $ printer-pages-per-minute $ printer-pages-per-minute-color $ printer-finishings-supported $ printer-number-up-supported $ printer-sides-supported $ printer-media-supported $ printer-media-local-supported $ printer-resolution-supported $ printer-print-quality-supported $ printer-job-priority-supported $ printer-copies-supported $ printer-job-k-octets-supported $ printer-current-operator $ printer-service-person $ printer-delivery-orientation-supported $ printer-stacking-order-supported $ printer-output-features-supported ) X-ORIGIN 'rfc3712' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.18.0.2.6.255 NAME 'printerService' DESC 'Printer information.' SUP printerAbstract STRUCTURAL MAY ( printer-uri $ printer-xri-supported ) X-ORIGIN 'rfc3712' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.18.0.2.6.257 NAME 'printerServiceAuxClass' DESC 'Printer information.' SUP printerAbstract AUXILIARY MAY ( printer-uri $ printer-xri-supported ) X-ORIGIN 'rfc3712' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.18.0.2.6.256 NAME 'printerIPP' DESC 'Internet Printing Protocol (IPP) information.' SUP top AUXILIARY MAY ( printer-ipp-versions-supported $ printer-multiple-document-jobs-supported ) X-ORIGIN 'rfc3712' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.18.0.2.6.253 NAME 'printerLPR' DESC 'LPR information.' SUP top AUXILIARY MUST printer-name MAY printer-aliases X-ORIGIN 'rfc3712' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.2312.4.3.4.1 NAME 'sabayonProfile' DESC 'sabayon profile' SUP top STRUCTURAL MUST cn MAY ( sabayonProfileURL $ description ) X-ORIGIN 'Sabayon' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.2312.4.3.4.2 NAME 'sabayonProfileNameObject' DESC 'contains sabayon profile name' SUP top AUXILIARY MUST sabayonProfileName X-ORIGIN 'Sabayon' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.2312.4.3.4.3 NAME 'sabayonProfileURLObject' DESC 'contains sabayon profile' SUP top AUXILIARY MUST cn MAY sabayonProfileURL X-ORIGIN 'Sabayon' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.3.6.1.4.1.15953.9.2.1 NAME 'sudoRole' DESC 'Sudoer Entries' SUP top STRUCTURAL MUST cn MAY ( sudoUser $ sudoHost $ sudoCommand $ sudoRunAs $ sudoRunAsUser $ sudoRunAsGroup $ sudoOption $ sudoNotBefore $ sudoNotAfter $ sudoOrder $ description ) X-ORIGIN 'SUDO' ) INFO:tests.tickets.ticket47973_test:OC: ( 5.3.6.1.1.1.2.0 NAME 'trustAccount' DESC 'Sets trust accounts information' SUP top AUXILIARY MUST trustModel MAY accessTo X-ORIGIN 'nss_ldap/pam_ldap' ) INFO:tests.tickets.ticket47973_test:OC: ( 1.2.3.4.5.6.7 NAME 'MozillaObject' SUP top STRUCTURAL MUST cn MAY MoZiLLaaTTRiBuTe X-ORIGIN 'user defined' ) INFO:tests.tickets.ticket47973_test:case 2: MOZILLAATTRIBUTE is not in the objectclasses list -- PASS INFO:tests.tickets.ticket47973_test:case 2-1: Use the custom schema with mozillaattribute INFO:tests.tickets.ticket47973_test:case 2-1: mozillaattribute: test user found-- PASS
Passed tickets/ticket47976_test.py::test_ticket47976_init 2.19
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists
Passed tickets/ticket47976_test.py::test_ticket47976_1 1.22
No log output captured.
Passed tickets/ticket47976_test.py::test_ticket47976_2 4.04
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47976_test:Test complete INFO:tests.tickets.ticket47976_test:Export LDIF file... INFO:lib389:Export task export_03152017_043235 for file /var/lib/dirsrv/slapd-standalone_1/ldif/export.ldif completed successfully INFO:tests.tickets.ticket47976_test:Import LDIF file... INFO:lib389:Import task import_03152017_043237 for file /var/lib/dirsrv/slapd-standalone_1/ldif/export.ldif completed successfully
Passed tickets/ticket47976_test.py::test_ticket47976_3 0.00
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket47976_test:Testing if the delete will hang or not INFO:tests.tickets.ticket47976_test:user0 was correctly deleted INFO:tests.tickets.ticket47976_test:user1 was correctly deleted
Passed tickets/ticket48005_test.py::test_ticket48005_setup 8.85
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48005_test:Ticket 48005 setup... INFO:tests.tickets.ticket48005_test:No ulimit -c in /etc/sysconfig/dirsrv INFO:tests.tickets.ticket48005_test:Adding it INFO:tests.tickets.ticket48005_test:No LimitCORE in /etc/sysconfig/dirsrv.systemd INFO:tests.tickets.ticket48005_test:Adding it ls: cannot access /var/lib/dirsrv/slapd-standalone_1/ldif/ticket48005.ldif: No such file or directory INFO:tests.tickets.ticket48005_test:dbgen_prog: /usr/bin/dbgen.pl INFO:tests.tickets.ticket48005_test:We have 10106 entries. INFO:lib389:Import task import_03152017_043303 for file /var/lib/dirsrv/slapd-standalone_1/ldif/ticket48005.ldif completed successfully INFO:tests.tickets.ticket48005_test:Importing /var/lib/dirsrv/slapd-standalone_1/ldif/ticket48005.ldif complete.
Passed tickets/ticket48005_test.py::test_ticket48005_memberof 6.05
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48005_test:Ticket 48005 memberof test... INFO:lib389:fixupMemberOf task fixupmemberof_03152017_043311 for basedn dc=example,dc=com completed successfully ls: cannot access /var/log/dirsrv/slapd-standalone_1/core*: No such file or directory INFO:tests.tickets.ticket48005_test:No core files are found INFO:tests.tickets.ticket48005_test:Ticket 48005 memberof test complete
Passed tickets/ticket48005_test.py::test_ticket48005_automember 12.30
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48005_test:Ticket 48005 automember test... INFO:tests.tickets.ticket48005_test:Adding automember config INFO:lib389:Automember Rebuild Membership task(task-03152017_043317) completed successfully ls: cannot access /var/log/dirsrv/slapd-standalone_1/core*: No such file or directory INFO:tests.tickets.ticket48005_test:No core files are found INFO:lib389:Automember Export Updates task (task-03152017_043320) completed successfully ls: cannot access /var/log/dirsrv/slapd-standalone_1/core*: No such file or directory INFO:tests.tickets.ticket48005_test:No core files are found INFO:lib389:Automember Map Updates task (task-03152017_043324) completed successfully ls: cannot access /var/log/dirsrv/slapd-standalone_1/core*: No such file or directory INFO:tests.tickets.ticket48005_test:No core files are found INFO:tests.tickets.ticket48005_test:Ticket 48005 automember test complete
Passed tickets/ticket48005_test.py::test_ticket48005_syntaxvalidate 1.22
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48005_test:Ticket 48005 syntax validate test... INFO:lib389:Syntax Validate task (task-03152017_043327) completed successfully ls: cannot access /var/log/dirsrv/slapd-standalone_1/core*: No such file or directory INFO:tests.tickets.ticket48005_test:No core files are found INFO:tests.tickets.ticket48005_test:Ticket 48005 syntax validate test complete
Passed tickets/ticket48005_test.py::test_ticket48005_usn 32.73
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48005_test:Ticket 48005 usn test... INFO:lib389:USN tombstone cleanup task (task-03152017_043353) completed successfully ls: cannot access /var/log/dirsrv/slapd-standalone_1/core*: No such file or directory INFO:tests.tickets.ticket48005_test:No core files are found INFO:tests.tickets.ticket48005_test:Ticket 48005 usn test complete
Passed tickets/ticket48005_test.py::test_ticket48005_schemareload 1.15
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48005_test:Ticket 48005 schema reload test... INFO:lib389:Schema Reload task (task-03152017_043401) completed successfully ls: cannot access /var/log/dirsrv/slapd-standalone_1/core*: No such file or directory INFO:tests.tickets.ticket48005_test:No core files are found INFO:tests.tickets.ticket48005_test:Ticket 48005 schema reload test complete
Passed tickets/ticket48013_test.py::test_ticket48013 0.09
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48013_test:Testing cookie: # INFO:tests.tickets.ticket48013_test:Invalid cookie correctly rejected: Invalid session cookie INFO:tests.tickets.ticket48013_test:Testing cookie: ## INFO:tests.tickets.ticket48013_test:Invalid cookie correctly rejected: Invalid session cookie INFO:tests.tickets.ticket48013_test:Testing cookie: a#a#a INFO:tests.tickets.ticket48013_test:Invalid cookie correctly rejected: Invalid session cookie INFO:tests.tickets.ticket48013_test:Testing cookie: a#a#1 INFO:tests.tickets.ticket48013_test:Invalid cookie correctly rejected: Invalid session cookie INFO:tests.tickets.ticket48013_test:Test complete
Passed tickets/ticket48026_test.py::test_ticket48026 2.20
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48026_test:Test complete
Passed tickets/ticket48109_test.py::test_ticket48109 13.31
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48109_test:Test case 0 INFO:tests.tickets.ticket48109_test:match: conn=1 op=2 INFO:tests.tickets.ticket48109_test:l1: [15/Mar/2017:04:34:19.005419111 -0400] conn=1 op=2 RESULT err=0 tag=101 nentries=1 etime=0 INFO:tests.tickets.ticket48109_test:match: nentires=1 INFO:tests.tickets.ticket48109_test:Entry uid=a* found. INFO:tests.tickets.ticket48109_test:Test case 0 - OK - substr index used INFO:tests.tickets.ticket48109_test:Test case 1 INFO:tests.tickets.ticket48109_test:match: conn=1 op=2 INFO:tests.tickets.ticket48109_test:l1: [15/Mar/2017:04:34:19.005419111 -0400] conn=1 op=2 RESULT err=0 tag=101 nentries=1 etime=0 INFO:tests.tickets.ticket48109_test:match: nentires=1 INFO:tests.tickets.ticket48109_test:Entry uid=*b found. INFO:tests.tickets.ticket48109_test:Test case 1 - OK - substr index used INFO:tests.tickets.ticket48109_test:Test case 2 INFO:tests.tickets.ticket48109_test:match: conn=1 op=2 INFO:tests.tickets.ticket48109_test:l1: [15/Mar/2017:04:34:19.005419111 -0400] conn=1 op=2 RESULT err=0 tag=101 nentries=1 etime=0 INFO:tests.tickets.ticket48109_test:match: nentires=1 INFO:tests.tickets.ticket48109_test:Entry uid=c* found. INFO:tests.tickets.ticket48109_test:Test case 2-1 - OK - correct substr index used INFO:tests.tickets.ticket48109_test:match: conn=1 op=3 INFO:tests.tickets.ticket48109_test:l1: [15/Mar/2017:04:34:27.860051192 -0400] conn=1 op=3 RESULT err=0 tag=101 nentries=1 etime=0 INFO:tests.tickets.ticket48109_test:match: nentires=1 INFO:tests.tickets.ticket48109_test:Entry uid=*2 found. INFO:tests.tickets.ticket48109_test:Test case 2-2 - OK - correct substr index used INFO:tests.tickets.ticket48109_test:Testcase PASSED
Passed tickets/ticket48170_test.py::test_ticket48170 0.00
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48170_test:Index update correctly rejected INFO:tests.tickets.ticket48170_test:Test complete
Passed tickets/ticket48194_test.py::test_init 2.44
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stdout call -----------------------------
/etc/dirsrv/slapd-standalone_1/pwdfile.txt 0d7707a73095de53f1f34992df68362b81cebc55 Is this a CA certificate [y/N]? Enter the path length constraint, enter to skip [<0 for unlimited path]: > Is this a critical extension [y/N]? ----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Testing Ticket 48194 - harden the list of ciphers available by default INFO:lib389:############################################### INFO:lib389.utils: ######################### Checking existing certs ###################### certutil: Could not find cert: CA certificate : PR_FILE_NOT_FOUND_ERROR: File not found certutil: Could not find cert: Server-Cert : PR_FILE_NOT_FOUND_ERROR: File not found INFO:lib389.utils: ######################### Create a password file ###################### INFO:lib389.utils: ######################### Create a noise file ###################### INFO:lib389.utils: ######################### Create key3.db and cert8.db database ###################### INFO:lib389.utils: ######################### Creating encryption key for CA ###################### Generating key. This may take a few moments... INFO:lib389.utils: ######################### Creating self-signed CA certificate ###################### Generating key. This may take a few moments... INFO:lib389.utils: ######################### Exporting the CA certificate to cacert.asc ###################### INFO:lib389.utils: ######################### Generate the server certificate ###################### Generating key. This may take a few moments... Notice: Trust flag u is set automatically if the private key is present. INFO:lib389.utils: ######################### create the pin file ###################### INFO:lib389.utils: ######################### enable SSL in the directory server with all ciphers ######################
Passed tickets/ticket48194_test.py::test_run_0 1.92
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is RC4-SHA INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is AES256-SHA256
Passed tickets/ticket48194_test.py::test_run_1 3.30
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is AES256-SHA256
Passed tickets/ticket48194_test.py::test_run_2 3.35
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES128-SHA -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES128-SHA INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is AES128-SHA INFO:lib389.utils:Testing AES256-SHA -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is AES256-SHA
Passed tickets/ticket48194_test.py::test_run_3 2.21
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 4 - Check the ciphers availability for "-all" INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE)
Passed tickets/ticket48194_test.py::test_run_4 3.29
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is AES256-SHA256
Passed tickets/ticket48194_test.py::test_run_5 3.29
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is AES256-SHA256
Passed tickets/ticket48194_test.py::test_run_6 3.33
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES128-SHA -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES128-SHA INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is AES128-SHA
Passed tickets/ticket48194_test.py::test_run_7 3.30
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing RC4-MD5 -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-MD5 INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is RC4-MD5
Passed tickets/ticket48194_test.py::test_run_8 3.28
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off) INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is AES256-SHA256
Passed tickets/ticket48194_test.py::test_run_9 3.27
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is RC4-SHA INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is AES256-SHA256
Passed tickets/ticket48194_test.py::test_run_10 2.27
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing RC4-MD5 -- expect to handshake successfully INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-MD5 INFO:lib389.utils:Found: New, TLSv1/SSLv3, Cipher is RC4-MD5 INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE)
Passed tickets/ticket48194_test.py::test_run_11 2.23
----------------------------- Captured stderr call -----------------------------
INFO:lib389: ############################################### INFO:lib389:####### Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported INFO:lib389:############################################### INFO:lib389.utils: ######################### Restarting the server ###################### INFO:lib389.utils:Testing RC4-SHA -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher RC4-SHA INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE) INFO:lib389.utils:Testing AES256-SHA256 -- expect to handshake failed INFO:lib389.utils:Running cmdline: /usr/bin/openssl s_client -connect localhost:636 -cipher AES256-SHA256 INFO:lib389.utils:Found: New, (NONE), Cipher is (NONE)
Passed tickets/ticket48212_test.py::test_ticket48212 93.68
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48212_test:Testing Ticket 48212 - Dynamic nsMatchingRule changes had no effect on the attrinfo thus following reindexing, as well. INFO:lib389:Bind as cn=Directory Manager INFO:lib389: ######################### Import Test data (/var/lib/dirsrv/slapd-standalone_1/ldif/example1k_posix.ldif) ###################### INFO:lib389:Import task import_03152017_043519 for file /var/lib/dirsrv/slapd-standalone_1/ldif/example1k_posix.ldif completed successfully INFO:lib389: +++++ dbverify +++++ INFO:lib389:Running /usr/sbin/dbverify -Z standalone_1 -V [15/Mar/2017:04:35:22.431623030 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryrdn.db: ok [15/Mar/2017:04:35:22.433819157 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db: ok [15/Mar/2017:04:35:22.434783517 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/ancestorid.db: ok [15/Mar/2017:04:35:22.435750047 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uid.db: ok [15/Mar/2017:04:35:22.436741980 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryusn.db: ok [15/Mar/2017:04:35:22.437588408 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/parentid.db: ok [15/Mar/2017:04:35:22.439927197 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/givenName.db: ok [15/Mar/2017:04:35:22.440885044 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/nsuniqueid.db: ok [15/Mar/2017:04:35:22.441663474 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/aci.db: ok [15/Mar/2017:04:35:22.444757153 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db: ok [15/Mar/2017:04:35:22.446538066 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/mail.db: ok [15/Mar/2017:04:35:22.448461932 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/sn.db: ok [15/Mar/2017:04:35:22.449856589 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/objectclass.db: ok [15/Mar/2017:04:35:22.450575726 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/numsubordinates.db: ok INFO:lib389:dbverify passed INFO:lib389: ######################### Add index by uidnumber ###################### INFO:lib389: ######################### reindexing... ###################### INFO:lib389: +++++ reindex uidnumber +++++ INFO:lib389:Running /usr/sbin/db2index.pl -Z standalone_1 -D "cn=Directory Manager" -w "password" -n userRoot -t uidnumber INFO:lib389: +++++ dbverify +++++ INFO:lib389:Running /usr/sbin/dbverify -Z standalone_1 -V [15/Mar/2017:04:35:52.613262370 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryrdn.db: ok [15/Mar/2017:04:35:52.615291788 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db: ok [15/Mar/2017:04:35:52.616150635 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/ancestorid.db: ok [15/Mar/2017:04:35:52.616922165 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uid.db: ok [15/Mar/2017:04:35:52.617489848 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryusn.db: ok [15/Mar/2017:04:35:52.618057622 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/parentid.db: ok [15/Mar/2017:04:35:52.620389217 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/givenName.db: ok [15/Mar/2017:04:35:52.621238201 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/nsuniqueid.db: ok [15/Mar/2017:04:35:52.621803074 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/aci.db: ok [15/Mar/2017:04:35:52.622583525 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uidnumber.db: ok [15/Mar/2017:04:35:52.625225184 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db: ok [15/Mar/2017:04:35:52.626808029 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/mail.db: ok [15/Mar/2017:04:35:52.628809235 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/sn.db: ok [15/Mar/2017:04:35:52.629954008 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/objectclass.db: ok [15/Mar/2017:04:35:52.630496562 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/numsubordinates.db: ok INFO:lib389:dbverify passed INFO:lib389: ######################### Add nsMatchingRule ###################### INFO:lib389: ######################### reindexing... ###################### INFO:lib389: +++++ reindex uidnumber +++++ INFO:lib389:Running /usr/sbin/db2index.pl -Z standalone_1 -D "cn=Directory Manager" -w "password" -n userRoot -t uidnumber INFO:lib389: +++++ dbverify +++++ INFO:lib389:Running /usr/sbin/dbverify -Z standalone_1 -V [15/Mar/2017:04:36:22.769521009 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryrdn.db: ok [15/Mar/2017:04:36:22.771747967 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db: ok [15/Mar/2017:04:36:22.772573038 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/ancestorid.db: ok [15/Mar/2017:04:36:22.773248705 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uid.db: ok [15/Mar/2017:04:36:22.773681202 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryusn.db: ok [15/Mar/2017:04:36:22.774229085 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/parentid.db: ok [15/Mar/2017:04:36:22.776426809 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/givenName.db: ok [15/Mar/2017:04:36:22.777131866 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/nsuniqueid.db: ok [15/Mar/2017:04:36:22.777569320 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/aci.db: ok [15/Mar/2017:04:36:22.778359543 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uidnumber.db: ok [15/Mar/2017:04:36:22.781008899 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db: ok [15/Mar/2017:04:36:22.782463705 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/mail.db: ok [15/Mar/2017:04:36:22.784102338 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/sn.db: ok [15/Mar/2017:04:36:22.785182277 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/objectclass.db: ok [15/Mar/2017:04:36:22.785653737 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/numsubordinates.db: ok INFO:lib389:dbverify passed INFO:lib389: ######################### Delete nsMatchingRule ###################### INFO:lib389: +++++ reindex uidnumber +++++ INFO:lib389:Running /usr/sbin/db2index.pl -Z standalone_1 -D "cn=Directory Manager" -w "password" -n userRoot -t uidnumber INFO:lib389: +++++ dbverify +++++ INFO:lib389:Running /usr/sbin/dbverify -Z standalone_1 -V [15/Mar/2017:04:36:52.932405811 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryrdn.db: ok [15/Mar/2017:04:36:52.934843716 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db: ok [15/Mar/2017:04:36:52.935773419 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/ancestorid.db: ok [15/Mar/2017:04:36:52.936527675 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uid.db: ok [15/Mar/2017:04:36:52.937068066 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/entryusn.db: ok [15/Mar/2017:04:36:52.937758803 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/parentid.db: ok [15/Mar/2017:04:36:52.939909525 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/givenName.db: ok [15/Mar/2017:04:36:52.940741153 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/nsuniqueid.db: ok [15/Mar/2017:04:36:52.941315168 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/aci.db: ok [15/Mar/2017:04:36:52.942278719 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/uidnumber.db: ok [15/Mar/2017:04:36:52.944804516 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db: ok [15/Mar/2017:04:36:52.946303711 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/mail.db: ok [15/Mar/2017:04:36:52.947941829 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/sn.db: ok [15/Mar/2017:04:36:52.949041752 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/objectclass.db: ok [15/Mar/2017:04:36:52.949821555 -0400] - INFO - dbverify_ext - /var/lib/dirsrv/slapd-standalone_1/db/userRoot/numsubordinates.db: ok INFO:lib389:dbverify passed INFO:tests.tickets.ticket48212_test:Testcase PASSED
Passed tickets/ticket48214_test.py::test_ticket48214_run 0.06
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48214_test:Testing Ticket 48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value INFO:lib389:Bind as cn=Directory Manager INFO:lib389: ######################### Out of Box ###################### INFO:lib389: +++++ Check Max Ber Size +++++ INFO:lib389: +++++ Get maxbersize from dse.ldif +++++ INFO:lib389: Run CMD: egrep nsslapd-maxbersize /etc/dirsrv/slapd-standalone_1/dse.ldif INFO:lib389: Empty: INFO:lib389: No nsslapd-maxbersize found in dse.ldif INFO:lib389: ldapsearch returned nsslapd-maxbersize: 2097152 INFO:lib389: Checking 2097152 vs 2097152 INFO:lib389: ######################### Add nsslapd-maxbersize: 0 ###################### INFO:lib389: +++++ Check Max Ber Size +++++ INFO:lib389: +++++ Get maxbersize from dse.ldif +++++ INFO:lib389: Run CMD: egrep nsslapd-maxbersize /etc/dirsrv/slapd-standalone_1/dse.ldif INFO:lib389: Right format - nsslapd-maxbersize: 0 INFO:lib389: nsslapd-maxbersize: 0 INFO:lib389: ldapsearch returned nsslapd-maxbersize: 2097152 INFO:lib389: Checking 2097152 vs 2097152 INFO:lib389: ######################### Add nsslapd-maxbersize: 10000 ###################### INFO:lib389: +++++ Check Max Ber Size +++++ INFO:lib389: +++++ Get maxbersize from dse.ldif +++++ INFO:lib389: Run CMD: egrep nsslapd-maxbersize /etc/dirsrv/slapd-standalone_1/dse.ldif INFO:lib389: Right format - nsslapd-maxbersize: 10000 INFO:lib389: nsslapd-maxbersize: 10000 INFO:lib389: ldapsearch returned nsslapd-maxbersize: 10000 INFO:lib389:ticket48214 was successfully verified.
Passed tickets/ticket48233_test.py::test_ticket48233 4.20
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48233_test:Test complete
Passed tickets/ticket48252_test.py::test_ticket48252_setup 0.02
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48252_test:Enable the USN plugin... INFO:tests.tickets.ticket48252_test:Adding test entries...
Passed tickets/ticket48252_test.py::test_ticket48252_run_0 6.67
----------------------------- Captured stdout call -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists Usage: db2index [-Z serverID] [-n backend | {-s includesuffix}* -t attribute[:indextypes[:matchingrules]] -T vlvTag] [-h] Options: -Z serverID - Server instance identifier -n backend - Backend database name. Example: userRoot -s includeSuffix - The suffix to index -t attribute[:indextypes[:matchingrules]] - attributeName: name of the attribute to be indexed If omitted, all the indexes defined for that instance are generated. - indextypes: comma separated index types - matchingrules: comma separated matrules Example: -t foo:eq,pres -T vlvTag - VLV index name -h - Display usage OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48252_test:Case 1 - Check deleted entry is not in the 'cn' index file INFO:tests.tickets.ticket48252_test: Deleting a test entry cn=test_user0,dc=example,dc=com... INFO:tests.tickets.ticket48252_test: dbscan - checking test_user0 is in index file cn... INFO:lib389:Running script: /usr/bin/dbscan -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db INFO:lib389:Output from /usr/bin/dbscan -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db INFO:lib389:('*%20ad \n*%20ma \n*^ac \n*^di \n*^hr \n*^pd \n*^qa \n*^te \n*_us \n*a%20m \n*acc \n*adm \n*age \n*ana \n*ato \n*cco \n*cou \n*cto \n*d%20m \n*dir \n*dmi \n*ect \n*er1 \n*er2 \n*er3 \n*er4 \n*er5 \n*er6 \n*er7 \n*er8 \n*er9 \n*ers \n*est \n*g%20m \n*ger \n*hr%20 \n*ing \n*ini \n*ire \n*ist \n*man \n*min \n*nag \n*ng%20 \n*nis \n*nti \n*ors \n*ory \n*oun \n*pd%20 \n*qa%20 \n*r%20m \n*r1$ \n*r2$ \n*r3$ \n*r4$ \n*r5$ \n*r6$ \n*r7$ \n*r8$ \n*r9$ \n*rat \n*rec \n*rs$ \n*ry%20 \n*ser \n*st_ \n*str \n*t_u \n*tes \n*tin \n*tor \n*tra \n*unt \n*use \n*y%20a \n+ \n=accounting%20managers \n=directory%20administrators \n=hr%20managers \n=pd%20managers \n=qa%20managers \n=test_user1 \n=test_user2 \n=test_user3 \n=test_user4 \n=test_user5 \n=test_user6 \n=test_user7 \n=test_user8 \n=test_user9 \n', None) INFO:lib389:Did not found key test_user0 in dbscan output INFO:tests.tickets.ticket48252_test: db2index - reindexing cn ... INFO:lib389:Running script: /usr/sbin/db2index -Z standalone_1 -n userRoot -s c -s n INFO:tests.tickets.ticket48252_test: dbscan - checking test_user0 is in index file cn... INFO:lib389:Running script: /usr/bin/dbscan -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db INFO:lib389:Output from /usr/bin/dbscan -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/cn.db INFO:lib389:('*%20ad \n*%20ma \n*^ac \n*^di \n*^hr \n*^pd \n*^qa \n*^te \n*_us \n*a%20m \n*acc \n*adm \n*age \n*ana \n*ato \n*cco \n*cou \n*cto \n*d%20m \n*dir \n*dmi \n*ect \n*er1 \n*er2 \n*er3 \n*er4 \n*er5 \n*er6 \n*er7 \n*er8 \n*er9 \n*ers \n*est \n*g%20m \n*ger \n*hr%20 \n*ing \n*ini \n*ire \n*ist \n*man \n*min \n*nag \n*ng%20 \n*nis \n*nti \n*ors \n*ory \n*oun \n*pd%20 \n*qa%20 \n*r%20m \n*r1$ \n*r2$ \n*r3$ \n*r4$ \n*r5$ \n*r6$ \n*r7$ \n*r8$ \n*r9$ \n*rat \n*rec \n*rs$ \n*ry%20 \n*ser \n*st_ \n*str \n*t_u \n*tes \n*tin \n*tor \n*tra \n*unt \n*use \n*y%20a \n+ \n=accounting%20managers \n=directory%20administrators \n=hr%20managers \n=pd%20managers \n=qa%20managers \n=test_user1 \n=test_user2 \n=test_user3 \n=test_user4 \n=test_user5 \n=test_user6 \n=test_user7 \n=test_user8 \n=test_user9 \n', None) INFO:lib389:Did not found key test_user0 in dbscan output INFO:tests.tickets.ticket48252_test: entry cn=test_user0,dc=example,dc=com is not in the cn index file after reindexed. INFO:tests.tickets.ticket48252_test:Case 1 - PASSED
Passed tickets/ticket48252_test.py::test_ticket48252_run_1 2.24
----------------------------- Captured stdout call -----------------------------
OK group dirsrv exists OK user dirsrv exists Usage: db2index [-Z serverID] [-n backend | {-s includesuffix}* -t attribute[:indextypes[:matchingrules]] -T vlvTag] [-h] Options: -Z serverID - Server instance identifier -n backend - Backend database name. Example: userRoot -s includeSuffix - The suffix to index -t attribute[:indextypes[:matchingrules]] - attributeName: name of the attribute to be indexed If omitted, all the indexes defined for that instance are generated. - indextypes: comma separated index types - matchingrules: comma separated matrules Example: -t foo:eq,pres -T vlvTag - VLV index name -h - Display usage ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48252_test:Case 2 - Check deleted entry is in the 'objectclass' index file as a tombstone entry INFO:tests.tickets.ticket48252_test: Deleting a test entry cn=test_user1,dc=example,dc=com... INFO:tests.tickets.ticket48252_test: entry cn=test_user1,dc=example,dc=com is in the objectclass index file. INFO:tests.tickets.ticket48252_test: db2index - reindexing objectclass ... INFO:lib389:Running script: /usr/sbin/db2index -Z standalone_1 -n userRoot -s o -s b -s j -s e -s c -s t -s c -s l -s a -s s -s s INFO:tests.tickets.ticket48252_test: entry cn=test_user1,dc=example,dc=com is in the objectclass index file after reindexed. INFO:tests.tickets.ticket48252_test:Case 2 - PASSED
Passed tickets/ticket48265_test.py::test_ticket48265_test 0.05
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48265_test:Adding 20 test entries... INFO:tests.tickets.ticket48265_test:Search with Ticket 47521 type complex filter INFO:tests.tickets.ticket48265_test:Search with Ticket 48265 type complex filter INFO:tests.tickets.ticket48265_test:Test 48265 complete
Passed tickets/ticket48270_test.py::test_ticket48270_init 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48270_test:Initialization: add dummy entries for the tests
Passed tickets/ticket48270_test.py::test_ticket48270_homeDirectory_indexed_cis 2.02
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48270_test: index homeDirectory in caseIgnoreIA5Match and caseExactIA5Match INFO:tests.tickets.ticket48270_test:successfully checked that filter with exact mr , a filter with lowercase eq is failing INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Index task index_homeDirectory_03152017_043750 completed successfully INFO:tests.tickets.ticket48270_test:Check indexing succeeded with a specified matching rule
Passed tickets/ticket48270_test.py::test_ticket48270_homeDirectory_mixed_value 0.00
No log output captured.
Passed tickets/ticket48270_test.py::test_ticket48270_extensible_search 0.00
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48270_test:Default: can retrieve an entry filter syntax with exact stored value INFO:tests.tickets.ticket48270_test:Default: can retrieve an entry filter caseExactIA5Match with exact stored value INFO:tests.tickets.ticket48270_test:Default: can not retrieve an entry filter syntax match with lowered stored value INFO:tests.tickets.ticket48270_test:Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value INFO:tests.tickets.ticket48270_test:Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value
Passed tickets/ticket48294_test.py::test_48294_init 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:############################################### INFO:lib389:####### Testing Ticket 48294 - Linked Attributes plug-in - won't update links after MODRDN operation INFO:lib389:############################################### INFO:tests.tickets.ticket48294_test:Enable Dynamic plugins, and the linked Attrs plugin INFO:tests.tickets.ticket48294_test:Add the plugin config entry INFO:tests.tickets.ticket48294_test:Add 2 entries: manager1 and employee1 INFO:tests.tickets.ticket48294_test:Add linktype to manager1 INFO:tests.tickets.ticket48294_test:Check managed attribute INFO:tests.tickets.ticket48294_test:Value of manager is uid=manager1,ou=People,dc=example,dc=com INFO:tests.tickets.ticket48294_test:PASSED
Passed tickets/ticket48294_test.py::test_48294_run_0 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389:############################################### INFO:lib389:####### Case 0 - Rename employee1 and adjust the link type value by replace INFO:lib389:############################################### INFO:tests.tickets.ticket48294_test:Rename employee1 to employee2 INFO:lib389: ######################### MODRDN uid=employee2 ###################### INFO:tests.tickets.ticket48294_test:Modify the value of directReport to uid=employee2 INFO:tests.tickets.ticket48294_test:Check managed attribute INFO:tests.tickets.ticket48294_test:Value of manager is uid=manager1,ou=People,dc=example,dc=com INFO:tests.tickets.ticket48294_test:PASSED
Passed tickets/ticket48294_test.py::test_48294_run_1 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389:############################################### INFO:lib389:####### Case 1 - Rename employee2 and adjust the link type value by delete and add INFO:lib389:############################################### INFO:tests.tickets.ticket48294_test:Rename employee2 to employee3 INFO:lib389: ######################### MODRDN uid=employee3 ###################### INFO:tests.tickets.ticket48294_test:Modify the value of directReport to uid=employee3 INFO:tests.tickets.ticket48294_test:Check managed attribute INFO:tests.tickets.ticket48294_test:Value of manager is uid=manager1,ou=People,dc=example,dc=com INFO:tests.tickets.ticket48294_test:PASSED
Passed tickets/ticket48294_test.py::test_48294_run_2 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389:############################################### INFO:lib389:####### Case 2 - Rename manager1 to manager2 and make sure the managed attribute value is updated INFO:lib389:############################################### INFO:tests.tickets.ticket48294_test:Rename manager1 to manager2 INFO:lib389: ######################### MODRDN uid=manager2 ###################### INFO:tests.tickets.ticket48294_test:Check managed attribute INFO:tests.tickets.ticket48294_test:Value of manager is uid=manager2,ou=People,dc=example,dc=com INFO:tests.tickets.ticket48294_test:PASSED
Passed tickets/ticket48295_test.py::test_48295_init 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:############################################### INFO:lib389:####### Testing Ticket 48295 - Entry cache is not rolled back -- Linked Attributes plug-in - wrong behaviour when adding valid and broken links INFO:lib389:############################################### INFO:tests.tickets.ticket48295_test:Enable Dynamic plugins, and the linked Attrs plugin INFO:tests.tickets.ticket48295_test:Add the plugin config entry INFO:tests.tickets.ticket48295_test:Add 2 entries: manager1 and employee1 INFO:tests.tickets.ticket48295_test:PASSED
Passed tickets/ticket48295_test.py::test_48295_run 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389:############################################### INFO:lib389:####### Add 2 linktypes to manager1 - one exists, another does not to make sure the managed entry does not have managed type. INFO:lib389:############################################### INFO:tests.tickets.ticket48295_test:Add uid=employee1 and uid=doNotExist expectedly failed. INFO:tests.tickets.ticket48295_test:Check managed attribute does not exist. INFO:tests.tickets.ticket48295_test:Value of manager does not expectedly exist INFO:tests.tickets.ticket48295_test:PASSED
Passed tickets/ticket48312_test.py::test_ticket48312 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48312_test:Test complete
Passed tickets/ticket48354_test.py::test_ticket48354 0.00
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48354_test:Test PASSED
Passed tickets/ticket48366_test.py::test_ticket48366_init 0.02
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:Add subtree: ou=green,dc=example,dc=com INFO:lib389:Add subtree: ou=red,dc=example,dc=com INFO:lib389:Add cn=test,ou=people,dc=example,dc=com INFO:lib389:Add cn=proxy,ou=people,dc=example,dc=com INFO:lib389.utils:Adding %d test entries...
Passed tickets/ticket48370_test.py::test_ticket48370 0.02
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48370_test:Test PASSED
Passed tickets/ticket48383_test.py::test_ticket48383 43.34
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stdout call -----------------------------
OK group dirsrv exists OK user dirsrv exists OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
CRITICAL:tests.tickets.ticket48383_test:Failed to change nsslapd-cachememsize No such object INFO:lib389:Running script: /usr/sbin/ns-slapd db2ldif -D /etc/dirsrv/slapd-standalone_1 -n userRoot -s dc=example,dc=com -r -a /var/lib/dirsrv/slapd-standalone_1/ldif/standalone_1.ldif [15/Mar/2017:04:39:20.712362247 -0400] - NOTICE - ldbm_back_start - found 1883664k physical memory [15/Mar/2017:04:39:20.713220837 -0400] - NOTICE - ldbm_back_start - found 188364k avaliable [15/Mar/2017:04:39:20.713757038 -0400] - NOTICE - ldbm_back_start - total cache size: 212716748 B; ldiffile: /var/lib/dirsrv/slapd-standalone_1/ldif/standalone_1.ldif [15/Mar/2017:04:39:20.840508736 -0400] - INFO - ldbm_back_ldbm2ldif - export userRoot: Processed 10 entries (100%). [15/Mar/2017:04:39:20.841398224 -0400] - INFO - dblayer_pre_close - Waiting for 4 database threads to stop [15/Mar/2017:04:39:21.729952188 -0400] - INFO - dblayer_pre_close - All database threads now stopped INFO:lib389:Running script: /usr/sbin/ns-slapd ldif2db -D /etc/dirsrv/slapd-standalone_1 -n userRoot -i /var/lib/dirsrv/slapd-standalone_1/ldif/standalone_1.ldif [15/Mar/2017:04:39:24.069611720 -0400] - INFO - dblayer_instance_start - Import is running with nsslapd-db-private-import-mem on; No other process is allowed to access the database [15/Mar/2017:04:39:24.070650293 -0400] - INFO - check_and_set_import_cache - pagesize: 4096, pages: 470916, procpages: 3010 [15/Mar/2017:04:39:24.071227276 -0400] - INFO - check_and_set_import_cache - Import allocates 495104KB import cache. [15/Mar/2017:04:39:24.088016690 -0400] - INFO - import_main_offline - import userRoot: Beginning import job... [15/Mar/2017:04:39:24.088683205 -0400] - INFO - import_main_offline - import userRoot: Index buffering enabled with bucket size 100 [15/Mar/2017:04:39:24.289663347 -0400] - INFO - import_producer - import userRoot: Processing file "/var/lib/dirsrv/slapd-standalone_1/ldif/standalone_1.ldif" [15/Mar/2017:04:39:24.367846093 -0400] - INFO - import_producer - import userRoot: Finished scanning file "/var/lib/dirsrv/slapd-standalone_1/ldif/standalone_1.ldif" (10 entries) [15/Mar/2017:04:39:37.688141902 -0400] - INFO - import_monitor_threads - import userRoot: Workers finished; cleaning up... [15/Mar/2017:04:39:37.890032288 -0400] - INFO - import_monitor_threads - import userRoot: Workers cleaned up. [15/Mar/2017:04:39:37.890740350 -0400] - INFO - import_main_offline - import userRoot: Cleaning up producer thread... [15/Mar/2017:04:39:37.891265323 -0400] - INFO - import_main_offline - import userRoot: Indexing complete. Post-processing... [15/Mar/2017:04:39:37.891738680 -0400] - INFO - import_main_offline - import userRoot: Generating numsubordinates (this may take several minutes to complete)... [15/Mar/2017:04:39:37.893529491 -0400] - INFO - import_main_offline - import userRoot: Generating numSubordinates complete. [15/Mar/2017:04:39:37.894139352 -0400] - INFO - ldbm_get_nonleaf_ids - import userRoot: Gathering ancestorid non-leaf IDs... [15/Mar/2017:04:39:37.894661339 -0400] - INFO - ldbm_get_nonleaf_ids - import userRoot: Finished gathering ancestorid non-leaf IDs. [15/Mar/2017:04:39:37.895724695 -0400] - INFO - ldbm_ancestorid_new_idl_create_index - import userRoot: Creating ancestorid index (new idl)... [15/Mar/2017:04:39:37.896277480 -0400] - INFO - ldbm_ancestorid_new_idl_create_index - import userRoot: Created ancestorid index (new idl). [15/Mar/2017:04:39:37.896917381 -0400] - INFO - import_main_offline - import userRoot: Flushing caches... [15/Mar/2017:04:39:37.897325403 -0400] - INFO - import_main_offline - import userRoot: Closing files... [15/Mar/2017:04:39:38.288614215 -0400] - INFO - dblayer_pre_close - All database threads now stopped [15/Mar/2017:04:39:38.289710220 -0400] - INFO - import_main_offline - import userRoot: Import complete. Processed 10 entries in 14 seconds. (0.71 entries/sec) INFO:tests.tickets.ticket48383_test:Test complete
Passed tickets/ticket48497_test.py::test_ticket48497_init 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48497_test:Initialization: add dummy entries for the tests
Passed tickets/ticket48497_test.py::test_ticket48497_homeDirectory_mixed_value 0.00
No log output captured.
Passed tickets/ticket48497_test.py::test_ticket48497_extensible_search 0.00
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48497_test:Default: can retrieve an entry filter syntax with exact stored value INFO:tests.tickets.ticket48497_test:Default: can retrieve an entry filter caseExactIA5Match with exact stored value INFO:tests.tickets.ticket48497_test:Default: can not retrieve an entry filter syntax match with lowered stored value INFO:tests.tickets.ticket48497_test:Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value INFO:tests.tickets.ticket48497_test:Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value
Passed tickets/ticket48497_test.py::test_ticket48497_homeDirectory_index_cfg 0.01
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48497_test: index homeDirectory in caseIgnoreIA5Match and caseExactIA5Match
Passed tickets/ticket48497_test.py::test_ticket48497_homeDirectory_index_run 2.01
----------------------------- Captured stderr call -----------------------------
INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Index task index_homeDirectory_03152017_043942 completed successfully INFO:tests.tickets.ticket48497_test:Check indexing succeeded with a specified matching rule
Passed tickets/ticket48637_test.py::test_ticket48637 2.21
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48637_test:Test PASSED
Passed tickets/ticket48665_test.py::test_ticket48665 0.01
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48665_test:5 entries are returned from the server. CRITICAL:tests.tickets.ticket48665_test:Failed to change nsslapd-cachememsize No such object INFO:tests.tickets.ticket48665_test:5 entries are returned from the server. INFO:tests.tickets.ticket48665_test:5 entries are returned from the server. INFO:tests.tickets.ticket48665_test:Test complete
Passed tickets/ticket48745_test.py::test_ticket48745_init 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48745_test:Initialization: add dummy entries for the tests
Passed tickets/ticket48745_test.py::test_ticket48745_homeDirectory_indexed_cis 2.02
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48745_test: index homeDirectory in caseIgnoreIA5Match and caseExactIA5Match INFO:tests.tickets.ticket48745_test:successfully checked that filter with exact mr , a filter with lowercase eq is failing INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Index task index_homeDirectory_03152017_043958 completed successfully INFO:tests.tickets.ticket48745_test:Check indexing succeeded with a specified matching rule
Passed tickets/ticket48745_test.py::test_ticket48745_homeDirectory_mixed_value 0.00
No log output captured.
Passed tickets/ticket48745_test.py::test_ticket48745_extensible_search_after_index 0.00
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48745_test:Default: can retrieve an entry filter syntax with exact stored value INFO:tests.tickets.ticket48745_test:Default: can retrieve an entry filter caseExactIA5Match with exact stored value INFO:tests.tickets.ticket48745_test:Default: can not retrieve an entry filter syntax match with lowered stored value INFO:tests.tickets.ticket48745_test:Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value INFO:tests.tickets.ticket48745_test:Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value
Passed tickets/ticket48746_test.py::test_ticket48746_init 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48746_test:Initialization: add dummy entries for the tests
Passed tickets/ticket48746_test.py::test_ticket48746_homeDirectory_indexed_cis 2.02
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48746_test: index homeDirectory in caseIgnoreIA5Match and caseExactIA5Match INFO:tests.tickets.ticket48746_test:successfully checked that filter with exact mr , a filter with lowercase eq is failing INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Index task index_homeDirectory_03152017_044004 completed successfully INFO:tests.tickets.ticket48746_test:Check indexing succeeded with a specified matching rule
Passed tickets/ticket48746_test.py::test_ticket48746_homeDirectory_mixed_value 0.00
No log output captured.
Passed tickets/ticket48746_test.py::test_ticket48746_extensible_search_after_index 0.00
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48746_test:Default: can retrieve an entry filter caseExactIA5Match with exact stored value
Passed tickets/ticket48746_test.py::test_ticket48746_homeDirectory_indexed_ces 2.02
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48746_test: index homeDirectory in caseExactIA5Match, this would trigger the crash INFO:tests.tickets.ticket48746_test:successfully checked that filter with exact mr , a filter with lowercase eq is failing INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Index task index_homeDirectory_03152017_044006 completed successfully INFO:tests.tickets.ticket48746_test:Check indexing succeeded with a specified matching rule
Passed tickets/ticket48759_test.py::test_ticket48759 20.20
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48759_test:Testing Ticket 48759 - no plugin calls for tombstone purging INFO:tests.tickets.ticket48759_test:Setting up replication... INFO:lib389:List backend with suffix=dc=example,dc=com INFO:lib389:Found entry dn: cn=replrepl,cn=config cn: bind dn pseudo user cn: replrepl objectClass: top objectClass: person sn: bind dn pseudo user userPassword: {SSHA512}xoA39TKCJhC3Zvp1oRe46iEjupmel7bpqWI/9jgDAgXC1vaKPormYCqFKwMK7ZfzFEzVGc4LL/iod+Wo1Yi/lPNBN5SVo5Ul INFO:tests.tickets.ticket48759_test:Enable plugins... INFO:tests.tickets.ticket48759_test:create users and group... INFO:tests.tickets.ticket48759_test:Adding members to the group... INFO:lib389:!!!!!!! uid=member2,dc=example,dc=com: memberof->cn=group,dc=example,dc=com INFO:tests.tickets.ticket48759_test:delete group... INFO:tests.tickets.ticket48759_test:add group again INFO:tests.tickets.ticket48759_test:Adding members to the group... INFO:lib389:!!!!!!! uid=member2,dc=example,dc=com: memberof->cn=group,dc=example,dc=com INFO:tests.tickets.ticket48759_test:get number of changes for uid=member2,dc=example,dc=com before tombstone purging INFO:tests.tickets.ticket48759_test:Wait for tombstone purge interval to pass ... INFO:tests.tickets.ticket48759_test:add dummy entry INFO:lib389:!!!!!!! uid=member2,dc=example,dc=com: memberof->cn=group,dc=example,dc=com INFO:tests.tickets.ticket48759_test:Wait for tombstone purge interval to pass again... INFO:tests.tickets.ticket48759_test:get number of changes for uid=member2,dc=example,dc=com before tombstone purging
Passed tickets/ticket48798_test.py::test_ticket48798 15.26
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
Generating key. This may take a few moments... Generating key. This may take a few moments... depth=1 C = AU, ST = Queensland, L = lib389, O = testing, CN = ca.lib389.example.com verify error:num=19:self signed certificate in certificate chain DONE depth=1 C = AU, ST = Queensland, L = lib389, O = testing, CN = ca.lib389.example.com verify error:num=19:self signed certificate in certificate chain DONE INFO:tests.tickets.ticket48798_test:Test complete
Passed tickets/ticket48844_test.py::test_ticket48844_init 0.29
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:List backend with suffix=dc=bitwise,dc=com INFO:lib389:Creating a local backend INFO:lib389:List backend cn=TestBitw,cn=ldbm database,cn=plugins,cn=config INFO:lib389:Found entry dn: cn=TestBitw,cn=ldbm database,cn=plugins,cn=config cn: TestBitw nsslapd-cachememsize: 512000 nsslapd-cachesize: -1 nsslapd-directory: /var/lib/dirsrv/slapd-standalone_1/db/TestBitw nsslapd-dncachememsize: 16777216 nsslapd-readonly: off nsslapd-require-index: off nsslapd-suffix: dc=bitwise,dc=com objectClass: top objectClass: extensibleObject objectClass: nsBackendInstance INFO:lib389:Entry dn: cn="dc=bitwise,dc=com",cn=mapping tree,cn=config cn: dc=bitwise,dc=com nsslapd-backend: TestBitw nsslapd-state: backend objectclass: top objectclass: extensibleObject objectclass: nsMappingTree INFO:lib389:Found entry dn: cn=dc\3Dbitwise\2Cdc\3Dcom,cn=mapping tree,cn=config cn: dc=bitwise,dc=com nsslapd-backend: TestBitw nsslapd-state: backend objectClass: top objectClass: extensibleObject objectClass: nsMappingTree
Passed tickets/ticket48844_test.py::test_ticket48844_bitwise_on 1.91
No log output captured.
Passed tickets/ticket48844_test.py::test_ticket48844_bitwise_off 2.22
No log output captured.
Passed tickets/ticket48891_test.py::test_ticket48891_setup 0.97
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48891_test:Testing Ticket 48891 - ns-slapd crashes during the shutdown after adding attribute with a matching rule INFO:lib389:Bind as cn=Directory Manager DEBUG:tests.tickets.ticket48891_test:Looking for a core file in: /var/log/dirsrv/slapd-standalone_1/ INFO:lib389: ######################### SETUP SUFFIX o=ticket48891.org ###################### INFO:lib389:List backend with suffix=dc=ticket48891.org INFO:lib389:Creating a local backend INFO:lib389:List backend cn=ticket48891,cn=ldbm database,cn=plugins,cn=config INFO:lib389:Found entry dn: cn=ticket48891,cn=ldbm database,cn=plugins,cn=config cn: ticket48891 nsslapd-cachememsize: 512000 nsslapd-cachesize: -1 nsslapd-directory: /var/lib/dirsrv/slapd-standalone_1/db/ticket48891 nsslapd-dncachememsize: 16777216 nsslapd-readonly: off nsslapd-require-index: off nsslapd-suffix: dc=ticket48891.org objectClass: top objectClass: extensibleObject objectClass: nsBackendInstance INFO:lib389:Entry dn: cn="dc=ticket48891.org",cn=mapping tree,cn=config cn: dc=ticket48891.org nsslapd-backend: ticket48891 nsslapd-state: backend objectclass: top objectclass: extensibleObject objectclass: nsMappingTree INFO:lib389:Found entry dn: cn=dc\3Dticket48891.org,cn=mapping tree,cn=config cn: dc=ticket48891.org nsslapd-backend: ticket48891 nsslapd-state: backend objectClass: top objectClass: extensibleObject objectClass: nsMappingTree INFO:lib389: ######################### Generate Test data ###################### INFO:lib389: ######################### SEARCH ALL ###################### INFO:lib389:Bind as cn=Directory Manager and add the READ/SEARCH SELFDN aci INFO:lib389:Returned 10 entries. INFO:lib389:10 person entries are successfully created under dc=ticket48891.org. INFO:tests.tickets.ticket48891_test:Testcase PASSED
Passed tickets/ticket48893_test.py::test_ticket48893 0.00
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48893_test:Test PASSED
Passed tickets/ticket48906_test.py::test_ticket48906_setup 0.02
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48906_test:Testing Ticket 48906 - ns-slapd crashes during the shutdown after adding attribute with a matching rule INFO:lib389:Bind as cn=Directory Manager INFO:lib389: ######################### SEARCH ALL ###################### INFO:lib389:Bind as cn=Directory Manager and add the READ/SEARCH SELFDN aci INFO:lib389:Returned 10 entries. INFO:lib389:10 person entries are successfully created under dc=example,dc=com.
Passed tickets/ticket48906_test.py::test_ticket48906_dblock_default 0.00
----------------------------- Captured stderr call -----------------------------
INFO:lib389:################################### INFO:lib389:### INFO:lib389:### Check that before any change config/monitor INFO:lib389:### contains the default value INFO:lib389:### INFO:lib389:###################################
Passed tickets/ticket48906_test.py::test_ticket48906_dblock_ldap_update 1.19
----------------------------- Captured stdout call -----------------------------
line locks:10000 expected_value 10000 value 10000 ----------------------------- Captured stderr call -----------------------------
INFO:lib389:################################### INFO:lib389:### INFO:lib389:### Check that after ldap update INFO:lib389:### - monitor contains DEFAULT INFO:lib389:### - configured contains DBLOCK_LDAP_UPDATE INFO:lib389:### - After stop dse.ldif contains DBLOCK_LDAP_UPDATE INFO:lib389:### - After stop guardian contains DEFAULT INFO:lib389:### In fact guardian should differ from config to recreate the env INFO:lib389:### Check that after restart (DBenv recreated) INFO:lib389:### - monitor contains DBLOCK_LDAP_UPDATE INFO:lib389:### - configured contains DBLOCK_LDAP_UPDATE INFO:lib389:### - dse.ldif contains DBLOCK_LDAP_UPDATE INFO:lib389:### INFO:lib389:###################################
Passed tickets/ticket48906_test.py::test_ticket48906_dblock_edit_update 2.24
----------------------------- Captured stdout call -----------------------------
line locks:20000 expected_value 20000 value 20000 line locks:40000 expected_value 40000 value 40000 ----------------------------- Captured stderr call -----------------------------
INFO:lib389:################################### INFO:lib389:### INFO:lib389:### Check that after stop INFO:lib389:### - dse.ldif contains DBLOCK_LDAP_UPDATE INFO:lib389:### - guardian contains DBLOCK_LDAP_UPDATE INFO:lib389:### Check that edit dse+restart INFO:lib389:### - monitor contains DBLOCK_EDIT_UPDATE INFO:lib389:### - configured contains DBLOCK_EDIT_UPDATE INFO:lib389:### Check that after stop INFO:lib389:### - dse.ldif contains DBLOCK_EDIT_UPDATE INFO:lib389:### - guardian contains DBLOCK_EDIT_UPDATE INFO:lib389:### INFO:lib389:###################################
Passed tickets/ticket48906_test.py::test_ticket48906_dblock_robust 1.46
----------------------------- Captured stdout call -----------------------------
line locks:40000 expected_value 40000 value 40000 ----------------------------- Captured stderr call -----------------------------
INFO:lib389:################################### INFO:lib389:### INFO:lib389:### Check that the following values are rejected INFO:lib389:### - negative value INFO:lib389:### - insuffisant value INFO:lib389:### - invalid value INFO:lib389:### Check that minimum value is accepted INFO:lib389:### INFO:lib389:###################################
Passed tickets/ticket48961_test.py::test_ticket48961_storagescheme 0.03
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48961_test:SSHA512 INFO:tests.tickets.ticket48961_test:Test PASSED
Passed tickets/ticket48961_test.py::test_ticket48961_deleteall 9.17
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket48961_test:Reseting passwordMinCategories INFO:tests.tickets.ticket48961_test: --> ['3'] INFO:tests.tickets.ticket48961_test:Reset passwordMinCategories to 3 INFO:tests.tickets.ticket48961_test:Reseting passwordMinLowers INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset passwordMinLowers to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logrotationsynchour INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logrotationsynchour to 0 INFO:tests.tickets.ticket48961_test:Reseting passwordMinSpecials INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset passwordMinSpecials to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-nagle INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-nagle to on INFO:tests.tickets.ticket48961_test:Reseting passwordUnlock INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset passwordUnlock to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-maxlogsize INFO:tests.tickets.ticket48961_test: --> ['100'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-maxlogsize to 100 INFO:tests.tickets.ticket48961_test:Reseting passwordWarning INFO:tests.tickets.ticket48961_test: --> ['86400'] INFO:tests.tickets.ticket48961_test:Reset passwordWarning to 86400 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-entryusn-global INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-entryusn-global to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-logminfreediskspace INFO:tests.tickets.ticket48961_test: --> ['5'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-logminfreediskspace to 5 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-logrotationsyncmin INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-logrotationsyncmin to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-reservedescriptors INFO:tests.tickets.ticket48961_test: --> ['64'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-reservedescriptors to 64 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logmaxdiskspace INFO:tests.tickets.ticket48961_test: --> ['500'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logmaxdiskspace to 500 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-enquote-sup-oc INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-enquote-sup-oc to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-readonly INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-readonly to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-syntaxcheck INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-syntaxcheck to on INFO:tests.tickets.ticket48961_test:Reseting passwordInHistory INFO:tests.tickets.ticket48961_test: --> ['6'] INFO:tests.tickets.ticket48961_test:Reset passwordInHistory to 6 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-maxbersize INFO:tests.tickets.ticket48961_test: --> ['2097152'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-maxbersize to 2097152 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logbuffering INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logbuffering to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-disk-monitoring-logging-critical INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-disk-monitoring-logging-critical to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logrotationtime INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logrotationtime to 1 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-disk-monitoring-threshold INFO:tests.tickets.ticket48961_test: --> ['2097152'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-disk-monitoring-threshold to 2097152 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-dn-validate-strict INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-dn-validate-strict to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ds4-compatible-schema INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ds4-compatible-schema to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ndn-cache-max-size INFO:tests.tickets.ticket48961_test: --> ['20971520'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ndn-cache-max-size to 20971520 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-timelimit INFO:tests.tickets.ticket48961_test: --> ['3600'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-timelimit to 3600 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-disk-monitoring INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-disk-monitoring to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-moddn-aci INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-moddn-aci to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-pwpolicy-inherit-global INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-pwpolicy-inherit-global to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-validate-cert INFO:tests.tickets.ticket48961_test: --> ['warn'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-validate-cert to warn INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logrotationsync-enabled INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logrotationsync-enabled to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logrotationtime INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logrotationtime to 1 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-logrotationtime INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-logrotationtime to 1 INFO:tests.tickets.ticket48961_test:Reseting passwordMin8bit INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset passwordMin8bit to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-cn-uses-dn-syntax-in-dns INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-cn-uses-dn-syntax-in-dns to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logrotationtimeunit INFO:tests.tickets.ticket48961_test: --> ['week'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logrotationtimeunit to week INFO:tests.tickets.ticket48961_test:Reseting nsslapd-disk-monitoring-grace-period INFO:tests.tickets.ticket48961_test: --> ['60'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-disk-monitoring-grace-period to 60 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-maxdescriptors INFO:tests.tickets.ticket48961_test: --> ['1024'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-maxdescriptors to 1024 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-allow-hashed-passwords INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-allow-hashed-passwords to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-logrotationtimeunit INFO:tests.tickets.ticket48961_test: --> ['week'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-logrotationtimeunit to week INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ssl-check-hostname INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ssl-check-hostname to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logging-enabled INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logging-enabled to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-logrotationsync-enabled INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-logrotationsync-enabled to off INFO:tests.tickets.ticket48961_test:Reseting passwordLockoutDuration INFO:tests.tickets.ticket48961_test: --> ['3600'] INFO:tests.tickets.ticket48961_test:Reset passwordLockoutDuration to 3600 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logexpirationtimeunit INFO:tests.tickets.ticket48961_test: --> ['month'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logexpirationtimeunit to month INFO:tests.tickets.ticket48961_test:Reseting passwordSendExpiringTime INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset passwordSendExpiringTime to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ldapiautobind INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ldapiautobind to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-extract-pemfiles INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-extract-pemfiles to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-maxthreadsperconn INFO:tests.tickets.ticket48961_test: --> ['5'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-maxthreadsperconn to 5 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logrotationsyncmin INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logrotationsyncmin to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ldapigidnumbertype INFO:tests.tickets.ticket48961_test: --> ['gidNumber'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ldapigidnumbertype to gidNumber INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logrotationtimeunit INFO:tests.tickets.ticket48961_test: --> ['day'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logrotationtimeunit to day INFO:tests.tickets.ticket48961_test:Reseting nsslapd-dynamic-plugins INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-dynamic-plugins to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-csnlogging INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-csnlogging to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-counters INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-counters to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-svrtab INFO:tests.tickets.ticket48961_test: --> [''] INFO:tests.tickets.ticket48961_test:override --> Some bogus data INFO:tests.tickets.ticket48961_test:Reset nsslapd-svrtab to INFO:tests.tickets.ticket48961_test:Reseting nsslapd-allowed-sasl-mechanisms INFO:tests.tickets.ticket48961_test: --> [''] INFO:tests.tickets.ticket48961_test:override --> GSSAPI INFO:tests.tickets.ticket48961_test:Reset nsslapd-allowed-sasl-mechanisms to INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logexpirationtimeunit INFO:tests.tickets.ticket48961_test: --> ['month'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logexpirationtimeunit to month INFO:tests.tickets.ticket48961_test:Reseting nsslapd-minssf INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-minssf to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logrotationsync-enabled INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logrotationsync-enabled to off INFO:tests.tickets.ticket48961_test:Reseting passwordMinAlphas INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset passwordMinAlphas to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-maxlogsize INFO:tests.tickets.ticket48961_test: --> ['100'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-maxlogsize to 100 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-security INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-security to off INFO:tests.tickets.ticket48961_test:Reseting passwordCheckSyntax INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset passwordCheckSyntax to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-allow-unauthenticated-binds INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-allow-unauthenticated-binds to off INFO:tests.tickets.ticket48961_test:Reseting passwordMinUppers INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset passwordMinUppers to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logrotationsync-enabled INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logrotationsync-enabled to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ldapifilepath INFO:tests.tickets.ticket48961_test: --> ['/var/run/slapd-standalone_1.socket'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ldapifilepath to /var/run/ldapi INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logging-enabled INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logging-enabled to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logrotationsyncmin INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logrotationsyncmin to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-pagedsizelimit INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-pagedsizelimit to 0 INFO:tests.tickets.ticket48961_test:Reseting passwordMaxAge INFO:tests.tickets.ticket48961_test: --> ['8640000'] INFO:tests.tickets.ticket48961_test:Reset passwordMaxAge to 8640000 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-global-backend-lock INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-global-backend-lock to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-logexpirationtime INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-logexpirationtime to 1 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-listen-backlog-size INFO:tests.tickets.ticket48961_test: --> ['128'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-listen-backlog-size to 128 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-plugin-logging INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-plugin-logging to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesscontrol INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesscontrol to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-mode INFO:tests.tickets.ticket48961_test: --> ['600'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-mode to 600 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-anonlimitsdn INFO:tests.tickets.ticket48961_test: --> [''] INFO:tests.tickets.ticket48961_test:Reset nsslapd-anonlimitsdn to INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-logging-enabled INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-logging-enabled to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logexpirationtime INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logexpirationtime to 1 INFO:tests.tickets.ticket48961_test:Reseting passwordMustChange INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset passwordMustChange to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logminfreediskspace INFO:tests.tickets.ticket48961_test: --> ['5'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logminfreediskspace to 5 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-logging-backend INFO:tests.tickets.ticket48961_test: --> ['dirsrv-log'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-logging-backend to dirsrv-log INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog INFO:tests.tickets.ticket48961_test: --> ['/var/log/dirsrv/slapd-standalone_1/audit'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog to INFO:tests.tickets.ticket48961_test:Reseting nsslapd-schema-ignore-trailing-spaces INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-schema-ignore-trailing-spaces to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logmaxdiskspace INFO:tests.tickets.ticket48961_test: --> ['100'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logmaxdiskspace to 100 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ldapimaprootdn INFO:tests.tickets.ticket48961_test: --> ['cn=Directory Manager'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ldapimaprootdn to cn=Directory Manager INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logging-enabled INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logging-enabled to off INFO:tests.tickets.ticket48961_test:Reseting passwordMaxFailure INFO:tests.tickets.ticket48961_test: --> ['3'] INFO:tests.tickets.ticket48961_test:Reset passwordMaxFailure to 3 INFO:tests.tickets.ticket48961_test:Reseting passwordMaxRepeats INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset passwordMaxRepeats to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-enable-nunc-stans INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-enable-nunc-stans to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-require-secure-binds INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-require-secure-binds to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-groupevalnestlevel INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-groupevalnestlevel to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-idletimeout INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-idletimeout to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logrotationtimeunit INFO:tests.tickets.ticket48961_test: --> ['week'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logrotationtimeunit to week INFO:tests.tickets.ticket48961_test:Reseting nsslapd-snmp-index INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-snmp-index to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ldapimaptoentries INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ldapimaptoentries to off INFO:tests.tickets.ticket48961_test:Reseting passwordChange INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset passwordChange to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-entryusn-import-initval INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-entryusn-import-initval to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logminfreediskspace INFO:tests.tickets.ticket48961_test: --> ['5'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logminfreediskspace to 5 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-force-sasl-external INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-force-sasl-external to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logging-hide-unhashed-pw INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logging-hide-unhashed-pw to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-maxlogsperdir INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-maxlogsperdir to 1 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-listenhost INFO:tests.tickets.ticket48961_test: --> [''] INFO:tests.tickets.ticket48961_test:override --> localhost INFO:tests.tickets.ticket48961_test:Reset nsslapd-listenhost to INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-mode INFO:tests.tickets.ticket48961_test: --> ['600'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-mode to 600 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logrotationsynchour INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logrotationsynchour to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-sasl-mapping-fallback INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-sasl-mapping-fallback to off INFO:tests.tickets.ticket48961_test:Reseting passwordStorageScheme INFO:tests.tickets.ticket48961_test: --> ['SSHA512'] INFO:tests.tickets.ticket48961_test:Reset passwordStorageScheme to SSHA512 INFO:tests.tickets.ticket48961_test:Reseting passwordLegacyPolicy INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset passwordLegacyPolicy to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-enable-turbo-mode INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-enable-turbo-mode to on INFO:tests.tickets.ticket48961_test:Reseting passwordMinTokenLength INFO:tests.tickets.ticket48961_test: --> ['3'] INFO:tests.tickets.ticket48961_test:Reset passwordMinTokenLength to 3 INFO:tests.tickets.ticket48961_test:Reseting passwordResetFailureCount INFO:tests.tickets.ticket48961_test: --> ['600'] INFO:tests.tickets.ticket48961_test:Reset passwordResetFailureCount to 600 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-mode INFO:tests.tickets.ticket48961_test: --> ['600'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-mode to 600 INFO:tests.tickets.ticket48961_test:Reseting passwordLockout INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset passwordLockout to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logmaxdiskspace INFO:tests.tickets.ticket48961_test: --> ['100'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logmaxdiskspace to 100 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-level INFO:tests.tickets.ticket48961_test: --> ['256'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-level to 256 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-return-exact-case INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-return-exact-case to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-maxsasliosize INFO:tests.tickets.ticket48961_test: --> ['2097152'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-maxsasliosize to 2097152 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logexpirationtimeunit INFO:tests.tickets.ticket48961_test: --> ['month'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logexpirationtimeunit to month INFO:tests.tickets.ticket48961_test:Reseting nsslapd-rewrite-rfc1274 INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-rewrite-rfc1274 to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-rootpwstoragescheme INFO:tests.tickets.ticket48961_test: --> ['SSHA512'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-rootpwstoragescheme to SSHA512 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditlog-logexpirationtime INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditlog-logexpirationtime to 1 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-allow-anonymous-access INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-allow-anonymous-access to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-maxlogsperdir INFO:tests.tickets.ticket48961_test: --> ['10'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-maxlogsperdir to 10 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-threadnumber INFO:tests.tickets.ticket48961_test: --> ['16'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-threadnumber to 16 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-schemamod INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-schemamod to on INFO:tests.tickets.ticket48961_test:Reseting passwordGraceLimit INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset passwordGraceLimit to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-search-return-original-type-switch INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-search-return-original-type-switch to off INFO:tests.tickets.ticket48961_test:Reseting passwordMinLength INFO:tests.tickets.ticket48961_test: --> ['8'] INFO:tests.tickets.ticket48961_test:Reset passwordMinLength to 8 INFO:tests.tickets.ticket48961_test:Reseting passwordTrackUpdateTime INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset passwordTrackUpdateTime to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ldapiuidnumbertype INFO:tests.tickets.ticket48961_test: --> ['uidNumber'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ldapiuidnumbertype to uidNumber INFO:tests.tickets.ticket48961_test:Reseting passwordMinDigits INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset passwordMinDigits to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-logging-hr-timestamps-enabled INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-logging-hr-timestamps-enabled to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ignore-time-skew INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ignore-time-skew to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ldapilisten INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ldapilisten to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-logmaxdiskspace INFO:tests.tickets.ticket48961_test: --> ['100'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-logmaxdiskspace to 100 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-lastmod INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-lastmod to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-max-filter-nest-level INFO:tests.tickets.ticket48961_test: --> ['40'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-max-filter-nest-level to 40 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-securelistenhost INFO:tests.tickets.ticket48961_test: --> [''] INFO:tests.tickets.ticket48961_test:override --> localhost INFO:tests.tickets.ticket48961_test:Reset nsslapd-securelistenhost to INFO:tests.tickets.ticket48961_test:Reseting nsslapd-maxsimplepaged-per-conn INFO:tests.tickets.ticket48961_test: --> ['-1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-maxsimplepaged-per-conn to -1 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-result-tweak INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-result-tweak to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-logexpirationtimeunit INFO:tests.tickets.ticket48961_test: --> ['month'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-logexpirationtimeunit to month INFO:tests.tickets.ticket48961_test:Reseting nsslapd-schemacheck INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-schemacheck to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-pwpolicy-local INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-pwpolicy-local to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-maxlogsize INFO:tests.tickets.ticket48961_test: --> ['100'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-maxlogsize to 100 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ldapientrysearchbase INFO:tests.tickets.ticket48961_test: --> ['dc=example,dc=com'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ldapientrysearchbase to dc=example,dc=com INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logexpirationtime INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logexpirationtime to 1 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-localssf INFO:tests.tickets.ticket48961_test: --> ['71'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-localssf to 71 INFO:tests.tickets.ticket48961_test:Reseting passwordIsGlobalPolicy INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset passwordIsGlobalPolicy to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-sizelimit INFO:tests.tickets.ticket48961_test: --> ['2000'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-sizelimit to 2000 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-minssf-exclude-rootdse INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-minssf-exclude-rootdse to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logrotationsynchour INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logrotationsynchour to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ignore-virtual-attrs INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ignore-virtual-attrs to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ndn-cache-enabled INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ndn-cache-enabled to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logrotationtime INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logrotationtime to 1 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-maxlogsperdir INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-maxlogsperdir to 1 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-unhashed-pw-switch INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-unhashed-pw-switch to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-sasl-max-buffer-size INFO:tests.tickets.ticket48961_test: --> ['2097152'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-sasl-max-buffer-size to 2097152 INFO:tests.tickets.ticket48961_test:Reseting passwordExp INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset passwordExp to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-maxlogsize INFO:tests.tickets.ticket48961_test: --> ['100'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-maxlogsize to 100 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-maxlogsperdir INFO:tests.tickets.ticket48961_test: --> ['1'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-maxlogsperdir to 1 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-mode INFO:tests.tickets.ticket48961_test: --> ['600'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-mode to 600 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-logrotationsynchour INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-logrotationsynchour to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-outbound-ldap-io-timeout INFO:tests.tickets.ticket48961_test: --> ['300000'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-outbound-ldap-io-timeout to 300000 INFO:tests.tickets.ticket48961_test:Reseting passwordMinAge INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset passwordMinAge to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logrotationsyncmin INFO:tests.tickets.ticket48961_test: --> ['0'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logrotationsyncmin to 0 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-schemareplace INFO:tests.tickets.ticket48961_test: --> ['replication-only'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-schemareplace to replication-only INFO:tests.tickets.ticket48961_test:Reseting nsslapd-plugin-binddn-tracking INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-plugin-binddn-tracking to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-errorlog-level INFO:tests.tickets.ticket48961_test: --> ['266338304'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-errorlog-level to 266338304 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-auditfaillog-logging-hide-unhashed-pw INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-auditfaillog-logging-hide-unhashed-pw to on INFO:tests.tickets.ticket48961_test:Reseting nsslapd-syntaxlogging INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-syntaxlogging to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-ioblocktimeout INFO:tests.tickets.ticket48961_test: --> ['1800000'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-ioblocktimeout to 1800000 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-SSLclientAuth INFO:tests.tickets.ticket48961_test: --> ['allowed'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-SSLclientAuth to allowed INFO:tests.tickets.ticket48961_test:Reseting nsslapd-attribute-name-exceptions INFO:tests.tickets.ticket48961_test: --> ['off'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-attribute-name-exceptions to off INFO:tests.tickets.ticket48961_test:Reseting nsslapd-rootdn INFO:tests.tickets.ticket48961_test: --> ['cn=Directory Manager'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-rootdn to cn=Directory Manager INFO:tests.tickets.ticket48961_test:Reseting nsslapd-accesslog-logminfreediskspace INFO:tests.tickets.ticket48961_test: --> ['5'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-accesslog-logminfreediskspace to 5 INFO:tests.tickets.ticket48961_test:Reseting nsslapd-connection-nocanon INFO:tests.tickets.ticket48961_test: --> ['on'] INFO:tests.tickets.ticket48961_test:Reset nsslapd-connection-nocanon to on INFO:tests.tickets.ticket48961_test:Removing cn INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-accesslog INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-accesslog-list INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-allowed-to-delete-attrs INFO:tests.tickets.ticket48961_test:This attribute isn't part of cn=config, so is already default! INFO:tests.tickets.ticket48961_test:Removing nsslapd-auditfaillog-list INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-auditlog INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-auditlog-list INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-backendconfig INFO:tests.tickets.ticket48961_test:This attribute isn't part of cn=config, so is already default! INFO:tests.tickets.ticket48961_test:Removing nsslapd-bakdir INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-betype INFO:tests.tickets.ticket48961_test:This attribute isn't part of cn=config, so is already default! INFO:tests.tickets.ticket48961_test:Removing nsslapd-certdir INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-certmap-basedn INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-config INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-connection-buffer INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-conntablesize INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-defaultnamingcontext INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-errorlog INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-errorlog-list INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-hash-filters INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-instancedir INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-ldifdir INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-localhost INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-localuser INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-lockdir INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-malloc-mmap-threshold INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-malloc-mxfast INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-malloc-trim-threshold INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-plugin INFO:tests.tickets.ticket48961_test:This attribute isn't part of cn=config, so is already default! INFO:tests.tickets.ticket48961_test:Removing nsslapd-port INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-privatenamespaces INFO:tests.tickets.ticket48961_test:This attribute isn't part of cn=config, so is already default! INFO:tests.tickets.ticket48961_test:Removing nsslapd-referralmode INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-requiresrestart INFO:tests.tickets.ticket48961_test:This attribute isn't part of cn=config, so is already default! INFO:tests.tickets.ticket48961_test:Removing nsslapd-rootpw INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-rundir INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-saslpath INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-schemadir INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-secureport INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-tmpdir INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing nsslapd-versionstring INFO:tests.tickets.ticket48961_test:This attribute isn't part of cn=config, so is already default! INFO:tests.tickets.ticket48961_test:Removing nsslapd-workingdir INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing objectclass INFO:tests.tickets.ticket48961_test:Change was rejected INFO:tests.tickets.ticket48961_test:Removing passwordadmindn INFO:tests.tickets.ticket48961_test:This attribute isn't part of cn=config, so is already default!
Passed tickets/ticket49104_test.py::test_ticket49104_setup 80.81
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:Import task import_03152017_044313 for file /var/lib/dirsrv/slapd-standalone_1/ldif/49104.ldif completed successfully
Passed tickets/ticket49104_test.py::test_ticket49104 64.09
----------------------------- Captured stderr call -----------------------------
INFO:tests.tickets.ticket49104_test:Test ticket 49104 -- dbscan crashes by memory corruption INFO:tests.tickets.ticket49104_test:Running script: valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=/tmp/val49104.out /usr/bin/dbscan-bin -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db -t 20 -R INFO:tests.tickets.ticket49104_test:Running script: valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=/tmp/val49104.out /usr/bin/dbscan-bin -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db -t 21 -R INFO:tests.tickets.ticket49104_test:Running script: valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=/tmp/val49104.out /usr/bin/dbscan-bin -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db -t 22 -R INFO:tests.tickets.ticket49104_test:Running script: valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=/tmp/val49104.out /usr/bin/dbscan-bin -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db -t 23 -R INFO:tests.tickets.ticket49104_test:Running script: valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=/tmp/val49104.out /usr/bin/dbscan-bin -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db -t 24 -R INFO:tests.tickets.ticket49104_test:Running script: valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=/tmp/val49104.out /usr/bin/dbscan-bin -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db -t 25 -R INFO:tests.tickets.ticket49104_test:Running script: valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=/tmp/val49104.out /usr/bin/dbscan-bin -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db -t 26 -R INFO:tests.tickets.ticket49104_test:Running script: valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=/tmp/val49104.out /usr/bin/dbscan-bin -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db -t 27 -R INFO:tests.tickets.ticket49104_test:Running script: valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=/tmp/val49104.out /usr/bin/dbscan-bin -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db -t 28 -R INFO:tests.tickets.ticket49104_test:Running script: valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=/tmp/val49104.out /usr/bin/dbscan-bin -f /var/lib/dirsrv/slapd-standalone_1/db/userRoot/id2entry.db -t 29 -R INFO:tests.tickets.ticket49104_test:ticket 49104 - PASSED
Passed tickets/ticket49122_test.py::test_ticket49122 2.20
---------------------------- Captured stdout setup -----------------------------
OK group dirsrv exists OK user dirsrv exists ----------------------------- Captured stderr call -----------------------------
INFO:lib389:Test Passed