Monday, December 30, 2013

RAC database dataguard configuration of two node DC and one node DR:

In drpdb1 as root:
###############################

Copy password file:
######## dcpdb1 User root: #################

# whoami
root
# pwd
/u02/app/oracle/product/11.2.0/dbhome_1/dbs
#

# scp orapwdcpdb1 drpdb1:/u02/app/oracle/product/11.2.0/dbhome_1/dbs
Warning: the RSA host key for 'drpdb1' differs from the key for the IP address '172.31.200.1'
Offending key for IP in /.ssh/known_hosts:6
Matching host key in /.ssh/known_hosts:9
Are you sure you want to continue connecting (yes/no)? yes
Password:
orapwdcpdb1                                    100% 3072     3.0KB/s   3.0KB/s   00:00  
#

###### drpdb1 user root: #########
# cd /u02/app/oracle/product/11.2.0/dbhome_1/dbs
# pwd
/u02/app/oracle/product/11.2.0/dbhome_1/dbs

# ls
hc_drpdb.dat   init.ora       orapwdcpdb1
initdrpdb.ora  lkDRPDB        peshm_drpdb_0

# mv orapwdcpdb1 orapwdrpdb

# ls
hc_drpdb.dat   init.ora       orapwdrpdb
initdrpdb.ora  lkDRPDB        peshm_drpdb_0

******************************************************************
drpdb1: as grid user:
******************************************************************
alter system set local_listener='(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=drpdb1-vip.nblitd.com)(PORT=1521))))' scope=both sid='+ASM1';
alter system register;


******************************************************************
drpdb1 as oracle user:
******************************************************************
alter system set local_listener='(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=drpdb1-vip.nblitd.com)(PORT=1521))))'scope=BOTH SID='drpdb1';
alter system register;

alter system set local_listener='(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=drpdb1-vip.nblitd.com)(PORT=1521))(ADDRESS=(PROTOCOL=TCP)(HOST=drpdb1-vip.nblitd.com)(PORT=1971))))'scope=BOTH SID='drpdb1';
alter system register;

******************************************************************
dcpdb1: as grid user:
******************************************************************

alter system set local_listener='(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=dcpdb1-vip.nblitd.com)(PORT=1521))))' scope=both sid='+ASM1';
alter system register;

******************************************************************
dcpdb2: as grid user:
******************************************************************

alter system set local_listener='(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=dcpdb2-vip.nblitd.com)(PORT=1521))))' scope=both sid='+ASM2';
alter system register;


******************************************************************
dcpdb2: as oracle user:
******************************************************************
alter system set local_listener='(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=dcpdb2-vip.nblitd.com)(PORT=1521))(ADDRESS=(PROTOCOL=TCP)(HOST=dcpdb2-vip.nblitd.com)(PORT=1971))))'scope=BOTH SID='dcpdb2';
alter system register;


******************************************************************
dcpdb1 as oracle user:
******************************************************************
alter system set local_listener='(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=dcpdb1-vip.nblitd.com)(PORT=1521))(ADDRESS=(PROTOCOL=TCP)(HOST=dcpdb1-vip.nblitd.com)(PORT=1590))))'scope=BOTH SID='dcpdb1';
alter system register;


******************************************************************
add logfile with thread 2:
******************************************************************
SQL> ALTER DATABASE ADD LOGFILE THREAD 2
     GROUP 5 ('+DATA','+DATA') SIZE 200M;

SQL> ALTER DATABASE ADD LOGFILE THREAD 2
     GROUP 6 ('+DATA','+DATA') SIZE 200M;


******************************************************************
create tablespace:
******************************************************************
SQL> CREATE TABLESPACE  T24MASTERDATA
     DATAFILE '+DATA' SIZE 10G;


SQL> CREATE TABLESPACE  T24MASTERINDEX
     DATAFILE '+DATA' SIZE 3G;




SQL> alter database force logging;

Database altered.



******************************************************************
add standby logfile with thread 1:
******************************************************************
SQL> ALTER DATABASE ADD STANDBY LOGFILE THREAD 1
  2  GROUP 7 ('+DATA','+DATA') size 200M;

Database altered.

SQL> ALTER DATABASE ADD STANDBY LOGFILE THREAD 1
  2  GROUP 8 ('+DATA','+DATA') size 200M;

Database altered.

SQL> ALTER DATABASE ADD STANDBY LOGFILE THREAD 1
  2  GROUP 9 ('+DATA','+DATA') size 200M;

Database altered.

SQL> ALTER DATABASE ADD STANDBY LOGFILE THREAD 1
  2  GROUP 10 ('+DATA','+DATA') size 200M;

Database altered.

******************************************************************
add standby logfile with thread 2:
******************************************************************
SQL> ALTER DATABASE ADD STANDBY LOGFILE THREAD 2
  2  GROUP 11 ('+DATA','+DATA') size 200M;

Database altered.

SQL> ALTER DATABASE ADD STANDBY LOGFILE THREAD 2
  2  GROUP 12 ('+DATA','+DATA') size 200M;

Database altered.

SQL> ALTER DATABASE ADD STANDBY LOGFILE THREAD 2
  2  GROUP 13 ('+DATA','+DATA') size 200M;

Database altered.

SQL> ALTER DATABASE ADD STANDBY LOGFILE THREAD 2
  2  GROUP 14 ('+DATA','+DATA') size 200M;

Database altered.


SQL> alter system set log_archive_config = 'DG_CONFIG=(dcpdb,drpdb)'scope=both sid='*';

System altered.

SQL> alter system set LOG_ARCHIVE_DEST_1='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=dcpdb' scope=both sid='*';

System altered.

SQL> alter system set LOG_ARCHIVE_DEST_2='SERVICE=drpdb SYNC AFFIRM VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=drpdb' scope=both sid='*';

System altered.

SQL> alter system set log_archive_dest_state_1=enable scope=both;

System altered.

SQL> alter system set log_archive_dest_state_2=enable scope=both;

System altered.

SQL> alter system set log_archive_format='%t_%s_%r.arc' scope=spfile sid='*';

System altered.

SQL> alter system set log_archive_max_processes=5 scope=both sid='*';

System altered.


SQL> alter system set fal_server=drpdb scope=both sid='*';

System altered.

SQL> alter system set fal_client=dcpdb scope=both sid='*';

System altered.


SQL> alter system set db_file_name_convert='drpdb','dcpdb' scope=spfile sid='*';

System altered.



SQL> alter system set log_file_name_convert='drpdb','dcpdb' scope=spfile sid='*';

System altered.


SQL> alter system set db_unique_name='dcpdb' scope=spfile sid='*';

System altered.


SQL> alter system set standby_file_management=AUTO scope=both sid='*';

System altered.


SQL> alter database set standby database to maximize availability;

Database altered.

drpdb1: user oracle
** Create a listener with following listener.ora file on the standby server:

LISTENERDB =
  (DESCRIPTION_LIST =
    (DESCRIPTION =
      (ADDRESS = (PROTOCOL = TCP)(HOST = drpdb1)(PORT = xxxx))
      (ADDRESS = (PROTOCOL = IPC)(KEY = EXTPROCxxxx))
    )
  )
SID_LIST_LISTENERDB =
(SID_LIST =
 (SID_DESC =
  (ORACLE_HOME= /u02/app/oracle/product/11.2.0/dbhome_1)
   (SID_NAME = drpdb)
    (GLOBAL_DBNAME= drpdb.nblitd.com)
     )
      )
ADR_BASE_LISTENERDB = /u02/app/oracle


N.B: Restart all server login again in dcpdb1 as oracle then


SQL> create pfile from spfile;

File created.

copy pfile to another location and modify parameter then paste this pfile in dbs location in drpdb1(dr).

initdcpdb1.ora file:

*.audit_file_dest='/u02/app/oracle/admin/drpdb/adump'
*.audit_trail='db'
*.compatible='11.2.0.0.0'
*.control_files='+DATA/drpdb/controlfile/current.261.809456283','+DATA/drpdb/controlfile/current.260.809456283'
*.db_block_size=8192
*.db_create_file_dest='+DATA'
*.db_create_online_log_dest_1='+DATA'
*.db_create_online_log_dest_2='+DATA'
*.db_domain='nblitd.com'
*.db_file_name_convert='dcpdb','drpdb'
*.db_name='dcpdb'
*.db_recovery_file_dest='+FRA'
*.db_recovery_file_dest_size=535797170176
*.db_unique_name='drpdb'
*.diagnostic_dest='/u02/app/oracle'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=dcpdbXDB)'
*.fal_client='drpdb'
*.fal_server='dcpdb'
dcpdb2.instance_number=2
dcpdb1.instance_number=1
*.job_queue_processes=1000
#*.local_listener='(ADDRESS = (PROTOCOL=TCP)(HOST =drpdb1.nblitd.com)(PORT=1590))'
*.log_archive_config='DG_CONFIG=(dcpdb,drpdb)'
*.log_archive_dest_1='SERVICE=dcpdb SYNC AFFIRM VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=dcpdb'
*.log_archive_dest_2='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=drpdb'
*.log_archive_dest_state_1='ENABLE'
*.log_archive_dest_state_2='ENABLE'
*.log_archive_format='%t_%s_%r.arc'
*.log_archive_max_processes=10
*.log_file_name_convert='dcpdb','drpdb'
*.memory_target=15440281600
*.open_cursors=5000
*.processes=5000
*.remote_login_passwordfile='exclusive'
*.sessions=5505
*.standby_file_management='AUTO'
dcpdb2.thread=2
dcpdb1.thread=1
dcpdb1.undo_tablespace='UNDOTBS1'
dcpdb2.undo_tablespace='UNDOTBS2'#It will be comment for single instance DR



In DRPDB1 as root:

# cd /u02/app/oracle/product/11.2.0/dbhome_1/dbs/

# ls
hc_drpdb.dat    init.ora        orapwdrpdb
initdcpdb1.ora  lkDRPDB         peshm_drpdb_0


# mv initdcpdb1.ora initdrpdb.ora
#


N:B: IN DRPDB1: user oracle

$ export ORACLE_BASE=/u02/app/oracle
$ export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1
$ export GRID_HOME=/u01/app/11.2.0/grid
$ export ORACLE_SID=drpdb
$ cd $ORACLE_HOME/bin


./sqlplus " / as sysdba"$

SQL*Plus: Release 11.2.0.1.0 Production on Wed Mar 13 15:53:13 2013

Copyright (c) 1982, 2009, Oracle.  All rights reserved.

Connected to an idle instance.

SQL> startup nomount pfile='/u02/app/oracle/product/11.2.0/dbhome_1/dbs/initdrpdb1.ora'
ORACLE instance started.

Total System Global Area 1.5400E+10 bytes
Fixed Size                  2184872 bytes
Variable Size            7683967320 bytes
Database Buffers         7683964928 bytes
Redo Buffers               29409280 bytes

SQL>

SQL> select * from v$pwfile_users;

USERNAME SYSDB SYSOP SYSAS
------------------------------ ----- ----- -----
SYS TRUE TRUE FALSE


In DCPDB1 login as oracle:

export ORACLE_BASE=/u02/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1
export GRID_HOME=/u01/app/11.2.0/grid
export ORACLE_SID=dcpdb1
cd $ORACLE_HOME/bin

$ ./rman target sys/HPinvent123nbl@dcpdb AUXILIARY sys/HPinvent123nbl@drpdb


Recovery Manager: Release 11.2.0.1.0 - Production on Thu Mar 14 13:33:16 2013

Copyright (c) 1982, 2009, Oracle and/or its affiliates.  All rights reserved.

connected to target database: DCPDB (DBID=322322458)
connected to auxiliary database: DCPDB (not mounted)

RMAN>


RMAN> duplicate target database for standby from active database;

Starting Duplicate Db at 14-MAR-13
using target database control file instead of recovery catalog
allocated channel: ORA_AUX_DISK_1
channel ORA_AUX_DISK_1: SID=5649 device type=DISK

contents of Memory Script:
{
   backup as copy reuse
   targetfile  '/u02/app/oracle/product/11.2.0/dbhome_1/dbs/orapwdcpdb1' auxiliary format
 '/u02/app/oracle/product/11.2.0/dbhome_1/dbs/orapwdrpdb'   ;
}
executing Memory Script

Starting backup at 14-MAR-13
allocated channel: ORA_DISK_1
channel ORA_DISK_1: SID=5655 instance=dcpdb1 device type=DISK
Finished backup at 14-MAR-13

contents of Memory Script:
{
   backup as copy current controlfile for standby auxiliary format  '+DATA/drpdb/controlfile/current.256.809999501';
   restore clone controlfile to  '+DATA/drpdb/controlfile/current.257.809999501' from
 '+DATA/drpdb/controlfile/current.256.809999501';
   sql clone "create spfile from memory";
   shutdown clone immediate;
   startup clone nomount;
   sql clone "alter system set  control_files =
  ''+DATA/drpdb/controlfile/current.256.809999501'', ''+DATA/drpdb/controlfile/current.257.809999501'' comment=
 ''Set by RMAN'' scope=spfile";
   shutdown clone immediate;
   startup clone nomount;
}
executing Memory Script

Starting backup at 14-MAR-13
using channel ORA_DISK_1
channel ORA_DISK_1: starting datafile copy
copying standby control file
output file name=/u02/app/oracle/product/11.2.0/dbhome_1/dbs/snapcf_dcpdb1.f tag=TAG20130314T135301 RECID=1 STAMP=810049983
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:03
Finished backup at 14-MAR-13

Starting restore at 14-MAR-13
using channel ORA_AUX_DISK_1

channel ORA_AUX_DISK_1: copied control file copy
Finished restore at 14-MAR-13

sql statement: create spfile from memory

Oracle instance shut down

connected to auxiliary database (not started)
Oracle instance started

Total System Global Area   15399526400 bytes

Fixed Size                     2184872 bytes
Variable Size               7717521752 bytes
Database Buffers            7650410496 bytes
Redo Buffers                  29409280 bytes

sql statement: alter system set  control_files =   ''+DATA/drpdb/controlfile/current.256.809999501'', ''+DATA/drpdb/controlfile/current.257.809999501'' comment= ''Set by RMAN'' scope=spfile

Oracle instance shut down

connected to auxiliary database (not started)
Oracle instance started

Total System Global Area   15399526400 bytes

Fixed Size                     2184872 bytes
Variable Size               7717521752 bytes
Database Buffers            7650410496 bytes
Redo Buffers                  29409280 bytes

contents of Memory Script:
{
   sql clone 'alter database mount standby database';
}
executing Memory Script

sql statement: alter database mount standby database
RMAN-05529: WARNING: DB_FILE_NAME_CONVERT resulted in invalid ASM names; names changed to disk group only.

contents of Memory Script:
{
   set newname for tempfile  1 to
 "+data";
   switch clone tempfile all;
   set newname for datafile  1 to
 "+data";
   set newname for datafile  2 to
 "+data";
   set newname for datafile  3 to
 "+data";
   set newname for datafile  4 to
 "+data";
   set newname for datafile  5 to
 "+data";
   set newname for datafile  6 to
 "+data";
   set newname for datafile  7 to
 "+data";
   backup as copy reuse
   datafile  1 auxiliary format
 "+data"   datafile
 2 auxiliary format
 "+data"   datafile
 3 auxiliary format
 "+data"   datafile
 4 auxiliary format
 "+data"   datafile
 5 auxiliary format
 "+data"   datafile
 6 auxiliary format
 "+data"   datafile
 7 auxiliary format
 "+data"   ;
   sql 'alter system archive log current';
}
executing Memory Script

executing command: SET NEWNAME

renamed tempfile 1 to +data in control file

executing command: SET NEWNAME

executing command: SET NEWNAME

executing command: SET NEWNAME

executing command: SET NEWNAME

executing command: SET NEWNAME

executing command: SET NEWNAME

executing command: SET NEWNAME

Starting backup at 14-MAR-13
using channel ORA_DISK_1
channel ORA_DISK_1: starting datafile copy
input datafile file number=00006 name=+DATA/dcpdb/datafile/t24masterdata.277.809459733
output file name=+DATA/drpdb/datafile/t24masterdata.258.809999543 tag=TAG20130314T135343
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:15:26
channel ORA_DISK_1: starting datafile copy
input datafile file number=00007 name=+DATA/dcpdb/datafile/t24masterindex.278.809459821
output file name=+DATA/drpdb/datafile/t24masterindex.259.810000471 tag=TAG20130314T135343
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:04:45
channel ORA_DISK_1: starting datafile copy
input datafile file number=00002 name=+DATA/dcpdb/datafile/sysaux.257.809456141
output file name=+DATA/drpdb/datafile/sysaux.260.810000755 tag=TAG20130314T135343
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:01:25
channel ORA_DISK_1: starting datafile copy
input datafile file number=00001 name=+DATA/dcpdb/datafile/system.256.809456139
output file name=+DATA/drpdb/datafile/system.261.810000841 tag=TAG20130314T135343
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:01:05
channel ORA_DISK_1: starting datafile copy
input datafile file number=00003 name=+DATA/dcpdb/datafile/undotbs1.258.809456141
output file name=+DATA/drpdb/datafile/undotbs1.262.810000907 tag=TAG20130314T135343
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:25
channel ORA_DISK_1: starting datafile copy
input datafile file number=00005 name=+DATA/dcpdb/datafile/undotbs2.267.809456519
output file name=+DATA/drpdb/datafile/undotbs2.263.810000931 tag=TAG20130314T135343
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:07
channel ORA_DISK_1: starting datafile copy
input datafile file number=00004 name=+DATA/dcpdb/datafile/users.259.809456141
output file name=+DATA/drpdb/datafile/users.264.810000939 tag=TAG20130314T135343
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:01
Finished backup at 14-MAR-13

sql statement: alter system archive log current

contents of Memory Script:
{
   switch clone datafile all;
}
executing Memory Script

datafile 1 switched to datafile copy
input datafile copy RECID=1 STAMP=810051347 file name=+DATA/drpdb/datafile/system.261.810000841
datafile 2 switched to datafile copy
input datafile copy RECID=2 STAMP=810051347 file name=+DATA/drpdb/datafile/sysaux.260.810000755
datafile 3 switched to datafile copy
input datafile copy RECID=3 STAMP=810051347 file name=+DATA/drpdb/datafile/undotbs1.262.810000907
datafile 4 switched to datafile copy
input datafile copy RECID=4 STAMP=810051347 file name=+DATA/drpdb/datafile/users.264.810000939
datafile 5 switched to datafile copy
input datafile copy RECID=5 STAMP=810051347 file name=+DATA/drpdb/datafile/undotbs2.263.810000931
datafile 6 switched to datafile copy
input datafile copy RECID=6 STAMP=810051347 file name=+DATA/drpdb/datafile/t24masterdata.258.809999543
datafile 7 switched to datafile copy
input datafile copy RECID=7 STAMP=810051347 file name=+DATA/drpdb/datafile/t24masterindex.259.810000471
Finished Duplicate Db at 14-MAR-13

RMAN>


Login DRPDB1 as oracle:

export ORACLE_BASE=/u02/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1
export GRID_HOME=/u01/app/11.2.0/grid
export ORACLE_SID=drpdb
cd $ORACLE_HOME/bin

./sqlplus " / as sysdba"

SQL*Plus: Release 11.2.0.1.0 Production on Thu Mar 14 14:23:29 2013

Copyright (c) 1982, 2009, Oracle.  All rights reserved.


Connected to:
Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - 64bit Production
With the Partitioning, Automatic Storage Management, OLAP, Data Mining
and Real Application Testing options

SQL> select status from v$instance;

STATUS
------------
MOUNTED

SQL> select flashback_on from v$database;

FLASHBACK_ON
------------------
NO

SQL> alter database flashback on;

Database altered.

SQL> select flashback_on from v$database;

FLASHBACK_ON
------------------
YES

SQL>

SQL> select database_role,PROTECTION_MODE,PROTECTION_LEVEL from v$database;

DATABASE_ROLE    PROTECTION_MODE      PROTECTION_LEVEL
---------------- -------------------- --------------------
PHYSICAL STANDBY MAXIMUM AVAILABILITY MAXIMUM AVAILABILITY

SQL> alter database open read only;

Database altered.

SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;

Database altered.

SQL>


SQL> select database_role,PROTECTION_MODE,PROTECTION_LEVEL from v$database;

DATABASE_ROLE    PROTECTION_MODE      PROTECTION_LEVEL
---------------- -------------------- --------------------
PHYSICAL STANDBY MAXIMUM AVAILABILITY MAXIMUM AVAILABILITY

SQL>


How to Open DR in Read only Mode?

SQL> shutdown immediate
ORA-01109: database not open


Database dismounted.
ORACLE instance shut down.
SQL> STARTUP MOUNT;
ORACLE instance started.

Total System Global Area 1.5400E+10 bytes
Fixed Size                  2184872 bytes
Variable Size            7717521752 bytes
Database Buffers         7650410496 bytes
Redo Buffers               29409280 bytes
Database mounted.
SQL> ALTER DATABASE OPEN READ ONLY;

Database altered.

SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;

Database altered.

SQL>


##################### Query #########################

set pagesize 999
set linesize 999
col member for a30
col status for a18

select l.thread#,l.group#,l.members, member,l.archived,l.status
  from v$log l, v$logfile lf
  where l.group# = lf.group#;

   THREAD#     GROUP#    MEMBERS MEMBER                         ARC STATUS
---------- ---------- ---------- ------------------------------ --- ------------------
         1          2          2 +DATA/drpdb/onlinelog/group_2. YES CLEARING
                                 267.810000953

         1          2          2 +DATA/drpdb/onlinelog/group_2. YES CLEARING
                                 268.810000955

         1          1          2 +DATA/drpdb/onlinelog/group_1. YES CLEARING_CURRENT
                                 265.810000949

         1          1          2 +DATA/drpdb/onlinelog/group_1. YES CLEARING_CURRENT
                                 266.810000951

         2          4          2 +DATA/drpdb/onlinelog/group_4. YES CLEARING
                                 271.810000961

         2          4          2 +DATA/drpdb/onlinelog/group_4. YES CLEARING
                                 272.810000963

         2          5          2 +DATA/drpdb/onlinelog/group_5. YES CLEARING
                                 273.810000965

         2          5          2 +DATA/drpdb/onlinelog/group_5. YES CLEARING
                                 274.810000967

         1          3          2 +DATA/drpdb/onlinelog/group_3. YES CLEARING
                                 269.810000955

         1          3          2 +DATA/drpdb/onlinelog/group_3. YES CLEARING
                                 270.810000957

         2          6          2 +DATA/drpdb/onlinelog/group_6. YES CLEARING_CURRENT
                                 275.810000967

         2          6          2 +DATA/drpdb/onlinelog/group_6. YES CLEARING_CURRENT
                                 276.810000969


12 rows selected.

SQL> select name from v$controlfile;

NAME
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+DATA/drpdb/controlfile/current.256.809999501
+DATA/drpdb/controlfile/current.257.809999501

SQL> select file_name, tablespace_name from dba_data_files;

FILE_NAME                                                                                                            TABLESPACE_NAME
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ------------------------------
+DATA/drpdb/datafile/users.264.810000939                                                                             USERS
+DATA/drpdb/datafile/undotbs1.262.810000907                                                                          UNDOTBS1
+DATA/drpdb/datafile/sysaux.260.810000755                                                                            SYSAUX
+DATA/drpdb/datafile/system.261.810000841                                                                            SYSTEM
+DATA/drpdb/datafile/undotbs2.263.810000931                                                                          UNDOTBS2
+DATA/drpdb/datafile/t24masterdata.258.809999543                                                                     T24MASTERDATA
+DATA/drpdb/datafile/t24masterindex.259.810000471                                                                    T24MASTERINDEX

7 rows selected.

SQL>

SQL> select recovery_mode from v$archive_dest_status;

RECOVERY_MODE
-----------------------
IDLE
MANAGED REAL TIME APPLY
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE
IDLE

32 rows selected.

SQL>

SQL> select database_role,PROTECTION_MODE,PROTECTION_LEVEL from v$database;

DATABASE_ROLE    PROTECTION_MODE      PROTECTION_LEVEL
---------------- -------------------- --------------------
PHYSICAL STANDBY MAXIMUM AVAILABILITY MAXIMUM AVAILABILITY




SQL> select database_mode, recovery_mode, protection_mode, synchronization_status, synchronized from v$archive_dest_status;

DATABASE_MODE   RECOVERY_MODE           PROTECTION_MODE      SYNCHRONIZATION_STATUS SYN
--------------- ----------------------- -------------------- ---------------------- ---
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
OPEN_READ-ONLY  MANAGED REAL TIME APPLY MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO
UNKNOWN         IDLE                    MAXIMUM PERFORMANCE  STATUS NOT AVAILABLE   NO

32 rows selected.

SQL>

SQL>


Change the Protection Mode

New Protection Mode: Maximum Availability.

    On Primary:

alter system set LOG_ARCHIVE_DEST_1='SERVICE=drpdb SYNC AFFIRM VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=drpdb' scope=both sid='*';
alter database set standby database to maximize availability;

    On Standby:


alter system set LOG_ARCHIVE_DEST_1='SERVICE=dcpdb SYNC AFFIRM VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=dcpdb' scope=both sid='*';
alter database set standby database to maximize availability;




DR Shutdown & Startup Process:

SQL> shutdown immediate
Database closed.
Database dismounted.
ORACLE instance shut down.
SQL> startup mount
ORACLE instance started.

Total System Global Area 1.5400E+10 bytes
Fixed Size                  2184872 bytes
Variable Size            7717521752 bytes
Database Buffers         7650410496 bytes
Redo Buffers               29409280 bytes
Database mounted.
SQL> alter database open read only;

Database altered.

SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;

Database altered.

SQL> select database_role,PROTECTION_MODE,PROTECTION_LEVEL from v$database;

DATABASE_ROLE    PROTECTION_MODE      PROTECTION_LEVEL
---------------- -------------------- --------------------
PHYSICAL STANDBY MAXIMUM AVAILABILITY MAXIMUM AVAILABILITY

SQL>

SQL> select status from v$instance;

STATUS
------------------
OPEN

SQL> select open_mode from v$database;

OPEN_MODE
--------------------
READ ONLY WITH APPLY

SQL>

SQL> select open_mode ,protection_mode , database_role from v$database ;


SQL> SELECT THREAD#, SEQUENCE#, APPLIED FROM V$ARCHIVED_LOG order by SEQUENCE# ;



###########################  Test Log Transport ##################


On the primary server, check the latest archived redo log and force a log switch.

dcpdb1: and dcpdb2:
    ALTER SESSION SET nls_date_format='DD-MON-YYYY HH24:MI:SS';

    SELECT sequence#, first_time, next_time
    FROM   v$archived_log
    ORDER BY sequence#;

    ALTER SYSTEM SWITCH LOGFILE;

Check the new archived redo log has arrived at the standby server and been applied.

drpdb1:
    ALTER SESSION SET nls_date_format='DD-MON-YYYY HH24:MI:SS';

    SELECT sequence#, first_time, next_time, applied
    FROM   v$archived_log
    ORDER BY sequence#;

############################ END Test Log Transport ##########

Network export datapump & import datapump steps or export import steps:

Prerequisites:

1        Source and Destination database must be connected through the network.
2        Both databases must be in open mode.
3    Source database (db1) tns detail must exist at destination (db2).

Following are the steps:
1      Crate a public database link at db2 connecting to db1 using system user.

SQL> create public database link db1_link connect to system identified by sys using 'db1';
or
SQL>CREATE public DATABASE LINK db1_link 
CONNECT TO USERNAME IDENTIFIED BY PASSWORD USING
'(DESCRIPTION=(ADDRESS_LIST=(
ADDRESS=(PROTOCOL=TCP)(HOST=xxx.xxx.xxx.xxx)(PORT=xxxx)))
(CONNECT_DATA=(SID=xxxx))
)'
/

Database link created.

Crosscheck the db link is working.

SQL> select * from dual@ db1_link;

D
-
X

You may face following error during impdp, if db link is not public.
ORA-39001: invalid argument value
ORA-39200: Link name "fmstest" is invalid.
ORA-02019: connection description for remote database not found

  
2.       Import using Impdp command at db2 (Destination)

[oracle@~]$ export ORACLE_SID=test01
[oracle@~]$ impdp system/password schemas= test_schema1 network_link= db1_link

Import: Release 11.2.0.2.0 - Production on Fri Jun 17 07:00:57 2011

Copyright (c) 1982, 2009, Oracle and/or its affiliates.  All rights reserved.

Connected to: Oracle Database 11g Enterprise Edition Release 11.2.0.2.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
Starting "SYSTEM"."SYS_IMPORT_SCHEMA_01":  system/******** schemas= test_schema1 network_link= db1_link
Estimate in progress using BLOCKS method...
Processing object type SCHEMA_EXPORT/TABLE/TABLE_DATA
Total estimation using BLOCKS method: 4 MB
Processing object type SCHEMA_EXPORT/USER
Processing object type SCHEMA_EXPORT/SYSTEM_GRANT
Processing object type SCHEMA_EXPORT/ROLE_GRANT
--------------------------------------------------------------------------



It should be public because users other than SYSTEM are the ones likely to be using it

Your next task is to create a directory where oracle can store the dump from the source machine while it is working:
CREATE DIRECTORY dmpdir AS '/opt/oracle'; < this was on other sites, didn't work for me as the directory didn't exist
ORA-39002: invalid operation
ORA-39070: Unable to open the log file.
ORA-29283: invalid file operation
ORA-06512: at "SYS.UTL_FILE", line 475
ORA-29283: invalid file operation

fixed it by
Drop directory dmpdir
CREATE DIRECTORY dmpdir AS '/usr/lib/oracle/xe/app/
oracle/product/';
not necessarily the best choice of directory, but one that existed and not full of other files (i could clean it easily)

Finally grant write access to the user you wish to do the copying to that directory
GRANT read, write ON DIRECTORY dmpdir TO BLAH;

Now in the command line, on the machine you are copying to:
navigate to ORACLE_HOME/bin
and run the following command:

impdp BLAH DIRECTORY=dmpdir NETWORK_LINK=blahsyslink;

Enter BLAH's password (for local system) when prompted

Sunday, December 22, 2013

START / STOP CLUSTERWARE CRS commands with examples


1. START / STOP CLUSTERWARE CRS:


To start and stop CRS when the machine starts or shutdown, on unix there are rc scripts in place.

/etc/init.d/init.crs start
/etc/init.d/init.crs stop
/etc/init.d/init.crs enable
/etc/init.d/init.crs disable

You can also, as root, manually start, stop, enable or disable the services with:

# crsctl start crs
# crsctl stop crs
# crsctl enable crs
# crsctl disable crs

On a unix system, you may find the following in the /etc/inittab file.

# cat /etc/inittab | grep crs
h3:35:respawn:/etc/init.d/init.crsd run > /dev/null 2>&1 </dev/null

# cat /etc/inittab | grep evmd
h1:35:respawn:/etc/init.d/init.evmd run > /dev/null 2>&1 </dev/null

# cat /etc/inittab | grep css
h2:35:respawn:/etc/init.d/init.cssd fatal > /dev/null 2>&1 </dev/null

/etc/init.d> ls -al *init*
init.crs
init.crsd
init.cssd
init.evmd

# cat /etc/inittab
..
..
h1:35:respawn:/etc/init.d/init.evmd run > /dev/null 2>&1 </dev/null
h2:35:respawn:/etc/init.d/init.cssd fatal > /dev/null 2>&1 </dev/null
h3:35:respawn:/etc/init.d/init.crsd run > /dev/null 2>&1 </dev/null


===================================
2. STARTING / STOPPING THE CLUSTER:
===================================

-- Stopping the Cluster:

Before you shut down any processes that are monitored by Enterprise Manager Grid Control, set a blackout in
Grid Control for the processes that you intend to shut down. This is necessary so that the availability records
for these processes indicate that the shutdown was planned downtime, rather than an unplanned system outage.
Shut down all Oracle RAC instances on all nodes. To shut down all Oracle RAC instances for a database,
enter the following command, where db_name is the name of the database:

$ ORACLE_HOME/bin/srvctl stop database -d db_name

Shut down all ASM instances on all nodes. To shut down an ASM instance, enter the following command,
where node is the name of the node where the ASM instance is running:

$ ORACLE_HOME/bin/srvctl stop asm -n node

Stop all node applications on all nodes. To stop node applications running on a node, enter the following command,
where node is the name of the node where the applications are running

$ ORACLE_HOME/bin/srvctl stop nodeapps -n node

Log in as the root user, and shut down the Oracle Clusterware or CRS process by entering the following command
on all nodes:

# CRS_HOME/bin/crsctl stop crs                                          # as root


----- Starting the Cluster:

# CRS_HOME/bin/crsctl start crs                                          # as root
$ ORACLE_HOME/bin/srvctl start nodeapps -n node
$ ORACLE_HOME/bin/srvctl start asm -n node
$ ORACLE_HOME/bin/srvctl start database -d db_name    # will start all instances of the Database


================================================
3. CRS_STAT -t command OR SRVCTL STATUS command:
================================================

CRS_STAT example:
-----------------

Viewing the status of instances, database, nodeapps:

For example, to list the status of the apps in the cluster, use crs_stat:

/home/oracle-->$CRS_HOME/bin/crs_stat -t


Name           Type           Target    State     Host
------------------------------------------------------------
ora....SM1.asm application    ONLINE    ONLINE    aix1
ora....x1.lsnr application    ONLINE    ONLINE    aix1
ora....ix1.gsd application    ONLINE    ONLINE    aix1
ora....ix1.ons application    ONLINE    ONLINE    aix1
ora....ix1.vip application    ONLINE    ONLINE    aix1
ora....SM2.asm application    ONLINE    ONLINE    aix2
ora....x2.lsnr application    ONLINE    ONLINE    aix2
ora....ix2.gsd application    ONLINE    ONLINE    aix2
ora....ix2.ons application    ONLINE    ONLINE    aix2
ora....ix2.vip application    ONLINE    ONLINE    aix2
ora....test.db application    ONLINE    ONLINE    aix1
ora....x1.inst application    ONLINE    ONLINE    aix1
ora....x2.inst application    ONLINE    ONLINE    aix2
/home/oracle-->


SRVCTL example:
---------------

$ srvctl status nodeapps -n mynewserver

VIP is running on node: mynewserver
GSD is running on node: mynewserver
Listener is not running on node: mynewserver
ONS daemon is running on node: mynewserver


CRSCTL example:
---------------

Checking crs on the node:

# crsctl check crs

Cluster Synchronization Services appears healthy
Cluster Ready Services appears healthy
Event Manager appears healthy

Checking crs clusterwide:

crsctl check cluster

# crsctl check cluster
node1-pub    ONLINE
node2-pub    ONLINE






===================
4. OTHER EXAMPLES:
===================


Example 1. Bring up the MYSID1 instance of the MYSID database.

$ srvctl start instance -d MYSID -i MYSID1

Example 2. Stop the MYSID database: all its instances and all its services, on all nodes.

$ srvctl stop database -d MYSID

Example 3. Stop the nodeapps on the myserver node. NB: Instances and services also stop.

$ srvctl stop nodeapps -n myserver

Example 4. Add the MYSID3 instance, which runs on the myserver node, to the MYSID
clustered database.

$ srvctl add instance -d MYSID -i MYSID3 -n myserver

Example 4. Add a new node, the mynewserver node, to a cluster.

$ srvctl add nodeapps -n mynewserver -o $ORACLE_HOME -A 10.10.10.100/255.255.255.0/eth1
(The -A flag precedes an address specification.)


srvctl add database -d drpdb -o $ORACLE_HOME -m local -p "+DATA/drpdb/spfiledrpdb.ora" -n drpdb1


$GIRD_HOME/bin/srvctl add database -d rac11g2s -o $ORACLE_HOME -m local -p "+DATA/rac11g2s/spfilerac11g2s.ora" -n rac11g2 -r physical_standby -

$GIRD_HOME/bin/




srvctl add instance -d rac11g2s -i rac11g2s1 -n rac4b

srvctl add database -d rac11g2s -o $ORACLE_HOME -m local -p "+DATA/rac11g2s/spfilerac11g2s.ora" -n rac11g2 -r physical_standby -s mount

each instance must be started on read only mode if all of them are used for read only application. On rac4b

SQL>

---------------------------------------------------------------------------------------------------------------

Modification on 19 Dec 2013 Thursday in NBL:

drpdb1:
Add Database to Cluster:

$oracle_home/bin/srvctl add database -d drpdb -o $ORACLE_HOME -m local -p "+DATA/drpdb/spfiledrpdb.ora" -n dcpdb -r physical_standby

$oracle_home/bin/srvctl add instance -d drpdb -i drpdb1 -n drpdb1


Drpdb1:
Modify ASM:

$GRID_HOME/bin/srvctl modify instance -d drpdb -i drpdb1 -s +ASM1

srvctl modify instance -d rac11g2s -i rac11g2s1 -s +ASM1



drpdb:
Start Standby Database drpdb:

$GRID_HOME/bin/srvctl status database -d drpdb

$GRID_HOME/bin/srvctl start database -d drpdb

srvctl config database -d drpdb



drpdb1:
Remove Database:

$ORACLE_HOME/bin/srvctl remove instance -d drpdb -i drpdb1

$ORACLE_HOME/bin/srvctl remove database -d drpdb



drpdb1:
Remove Listener from Cluster:

$GRID_HOME/bin/srvctl remove listener -n drpdb1 -l listenerdb



drpdb1:
Start Standby Database drpdb:

$GRID_HOME/bin/srvctl status database -d drpdb

$GRID_HOME/bin/srvctl start database -d drpdb


drpdb1:
Show Configuration of standby Database drpdb:

$GRID_HOME/bin/srvctl config database -d drpdb


Drpdb1:
Scan Listener Status:
$GRID_HOME/bin/srvctl status scan_listener

dcpdb1:

oracle@dcpdb1 [/home/oracle]$ emctl status dbconsole
Environment variable ORACLE_UNQNAME not defined. Please set ORACLE_UNQNAME to database unique name.
oracle@dcpdb1 [/home/oracle]$ export ORACLE_UNQNAME=dcpdb
oracle@dcpdb1 [/home/oracle]$ emctl status dbconsole
Oracle Enterprise Manager 11g Database Control Release 11.2.0.4.0
Copyright (c) 1996, 2013 Oracle Corporation.  All rights reserved.
https://dcpdb1:1158/em/console/aboutApplication
Oracle Enterprise Manager 11g is not running.
------------------------------------------------------------------
Logs are generated in directory /u02/app/oracle/product/11.2.0.4/dbhome_1/dcpdb1_dcpdb/sysman/log
oracle@dcpdb1 [/home/oracle]$ emctl start dbconsole
Oracle Enterprise Manager 11g Database Control Release 11.2.0.4.0
Copyright (c) 1996, 2013 Oracle Corporation.  All rights reserved.
https://dcpdb1:1158/em/console/aboutApplication
Starting Oracle Enterprise Manager 11g Database Control ........... started.
------------------------------------------------------------------
Logs are generated in directory /u02/app/oracle/product/11.2.0.4/dbhome_1/dcpdb1_dcpdb/sysman/log
oracle@dcpdb1 [/home/oracle]$ emctl status dbconsole
Oracle Enterprise Manager 11g Database Control Release 11.2.0.4.0
Copyright (c) 1996, 2013 Oracle Corporation.  All rights reserved.
https://dcpdb1:1158/em/console/aboutApplication
Oracle Enterprise Manager 11g is running.
------------------------------------------------------------------
Logs are generated in directory /u02/app/oracle/product/11.2.0.4/dbhome_1/dcpdb1_dcpdb/sysman/log
oracle@dcpdb1 [/home/oracle]$




Modification finished on 19 Dec 2013 Thursday in NBL:
-------------------------------------------------------------------

srvctl remove listener -n dcpdb1 -l listenerdb

srvctl remove listener -n dcpdb2 -l listenerdb



-----------------
grid@drpdb1 [/u01/app/11.2.0.4/grid/bin]$ srvctl status database -d drpdb
Instance drpdb1 is not running on node drpdb1
grid@drpdb1 [/u01/app/11.2.0.4/grid/bin]$ srvctl start database -d drpdb
grid@drpdb1 [/u01/app/11.2.0.4/grid/bin]$ crs_stat -t
Name           Type           Target    State     Host
------------------------------------------------------------
ora.DATA.dg    ora....up.type ONLINE    ONLINE    drpdb1
ora.FRA.dg     ora....up.type ONLINE    ONLINE    drpdb1
ora....ER.lsnr ora....er.type ONLINE    ONLINE    drpdb1
ora....BA.lsnr ora....er.type ONLINE    ONLINE    drpdb1
ora....N1.lsnr ora....er.type ONLINE    ONLINE    drpdb1
ora.OCR.dg     ora....up.type ONLINE    ONLINE    drpdb1
ora.VOTING.dg  ora....up.type ONLINE    ONLINE    drpdb1
ora.asm        ora.asm.type   ONLINE    ONLINE    drpdb1
ora.cvu        ora.cvu.type   ONLINE    ONLINE    drpdb1
ora.drpdb.db   ora....se.type ONLINE    ONLINE    drpdb1
ora....SM1.asm application    ONLINE    ONLINE    drpdb1
ora....B1.lsnr application    ONLINE    ONLINE    drpdb1
ora....B1.lsnr application    ONLINE    ONLINE    drpdb1
ora.drpdb1.gsd application    ONLINE    OFFLINE
ora.drpdb1.ons application    ONLINE    ONLINE    drpdb1
ora.drpdb1.vip ora....t1.type ONLINE    ONLINE    drpdb1
ora.gsd        ora.gsd.type   ONLINE    OFFLINE
ora....network ora....rk.type ONLINE    ONLINE    drpdb1
ora.oc4j       ora.oc4j.type  ONLINE    ONLINE    drpdb1
ora.ons        ora.ons.type   ONLINE    ONLINE    drpdb1
ora.scan1.vip  ora....ip.type ONLINE    ONLINE    drpdb1
grid@drpdb1 [/u01/app/11.2.0.4/grid/bin]$


-------------------

Example 5. To change the VIP (virtual IP) on a RAC node, use the command

$ srvctl modify nodeapps -A new_address

Example 6. Disable the ASM instance on myserver for maintenance.

$ srvctl disable asm -n myserver

Some other examples:
--------------------

$ srvctl add instance -d -i -n
$ srvctl add service -d -s -r “” [-a ""] [-P ]
$ srvctl add service -d -s -u {-r “” | -a “”}
$ srvctl add nodeapps -n -o -A /netmask[/if1[|if2|...]]
$ srvctl add asm -n -i -o [-p ]
$ srvctl config database
$ srvctl config database -d [-a] [-t]
$ srvctl config service -d [-s ] [-a] [-S ]
$ srvctl config nodeapps -n [-a] [-g] [-o] [-s] [-l]
$ srvctl config asm -n
$ srvctl config listener -n drpdb1
$ srvctl disable database -d
$ srvctl disable instance -d -i “”
$ srvctl disable service -d -s “” [-i ]
$ srvctl disable asm -n [-i ]
$ srvctl enable database -d
$ srvctl enable instance -d -i “”
$ srvctl enable service -d -s “” [-i ]
$ srvctl enable asm -n [-i ]
$ srvctl getenv database -d [-t ""]
$ srvctl getenv instance -d -i [-t ""]
$ srvctl getenv service -d -s [-t ""]
$ srvctl getenv nodeapps -n [-t ""]
$ srvctl modify database -d [-n <db_name] [-o ] [-m ] [-p ] [-r {PRIMARY | PHYSICAL_STANDBY | LOGICAL_STANDBY}] [-s ] [-y {AUTOMATIC | MANUAL}]
$ srvctl modify instance -d -i -n
$ srvctl modify instance -d -i {-s | -r}
$ srvctl modify service -d -s -i -t [-f]
$ srvctl modify service -d -s -i -r [-f]
$ srvctl modify service -d -s -n -i [-a ] [-f]
$ srvctl modify asm -n -i [-o ] [-p ]
$ srvctl relocate service -d -s -i -t [-f]
$ srvctl remove database -d [-f]
$ srvctl remove instance -d -i [-f]
$ srvctl remove service -d -s [-i ] [-f]
$ srvctl remove nodeapps -n “” [-f]
$ srvctl remove asm -n [-i ] [-f]
$ srvctl setenv database -d {-t =[,=,...] | -T =}
$ srvctl setenv instance -d [-i ] {-t “=[,=,...]” | -T “=”}
$ srvctl setenv service -d [-s ] {-t “=[,=,...]” | -T “=”}
$ srvctl setenv nodeapps -n {-t “=[,=,...]” | -T “=”}
$ srvctl start database -d [-o ] [-c | -q]
$ srvctl start instance -d -i “” [-o ] [-c | -q]
$ srvctl start service -d [-s "" [-i ]] [-o ] [-c | -q]
$ srvctl start nodeapps -n
$ srvctl start asm -n [-i ] [-o ] [-c | -q]
$ srvctl start listener -n [-l ]
$ srvctl status database -d [-f] [-v] [-S ]
$ srvctl status instance -d -i “” [-f] [-v] [-S ]
$ srvctl status service -d [-s ""] [-f] [-v] [-S ]
$ srvctl status nodeapps -n
$ srvctl status asm -n
$ srvctl stop database -d [-o ] [-c | -q]
$ srvctl stop instance -d -i “” [-o ] [-c | -q]
$ srvctl stop service -d [-s "" [-i ]] [-c | -q] [-f]
$ srvctl stop nodeapps -n
$ srvctl stop asm -n [-i ] [-o ] [-c | -q]
$ srvctl stop listener -n [-l ]
$ srvctl unsetenv database -d -t “”
$ srvctl unsetenv instance -d [-i ] -t “”
$ srvctl unsetenv service -d [-s ] -t “”
$ srvctl unsetenv nodeapps -n -t “”

Tuesday, December 17, 2013

Manual cloning database from one server to another?

First of all you will have to take the full backup of the primary database by using RMAN and transfer the same backup on new server then follow the bellow steps. 

Following are the steps to restore and recover the database on different server. 
1) Create the init. ora file. 
2) startup nomount. 
3) Connect to the RMAN Prompt. 
4) Restore the controlfile. 
5) catalog the backup. 
6) startup mount 
7) Restore database. 
8) Recover database. 
9) alter database open resetlogs. 
10) Select name,open_mode from v$database; 

Do let me know if you need any other information. 

Or you can use the "duplicate command" for clonning the database on different server.

Sunday, December 15, 2013

Oracle Real Application Cluster(RAC)

Oracle 11gR2 RAC Installation Steps on Linux

Oracle 11g Real Application Cluster Setup Steps

Prepare the cluster nodes for Oracle RAC:
User Accounts:
NOTE: Oracle recommend different users for the installation of the Grid Infrastructure (GI) and the Oracle RDBMS home. The GI will be installed in a separate Oracle base, owned by user 'grid.' After the grid install the GI home will be owned by root, and inaccessible to unauthorized users.

1.  Create OS groups using the command below. Enter these commands as the 'root' user:

#/usr/sbin/groupadd -g 501 oinstall
#/usr/sbin/groupadd -g 502 dba
#/usr/sbin/groupadd -g 503 oper
#/usr/sbin/groupadd -g 504 asmadmin
#/usr/sbin/groupadd -g 506 asmdba
#/usr/sbin/groupadd -g 507 asmoper

2.     Create the users that will own the Oracle software using the commands:

#/usr/sbin/useradd -u 501 -c "Oracle Grid Infrastructure Owner"  -g oinstall -G asmadmin,asmdba,asmoper grid
#/usr/sbin/useradd -u 502 -c "Oracle RDBMS Owner" -g oinstall -G dba,oper,asmdba oracle

3.  Set the password for the oracle account using the following command. Replace password with your own password.

[root@krac1 ~]# passwd grid
Changing password for user grid.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
[root@krac1 ~]#passwd oracle
Changing password for user oracle.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.

4.  Repeat Step 1 through Step 3 on each node in your cluster.

Networking:

1.    Determine your cluster name. The cluster name should satisfy the following conditions:The cluster name is globally unique throughout your host domain.  The cluster name is at least 1 character long and less than 15 characters long.  The cluster name must consist of the same character set used for host names: single-byte alphanumeric characters (a to z, A to Z, and 0 to 9) and hyphens (-).
2.  Determine the public host name for each node in the cluster. For the public host name, use the primary host name of each node. In other words, use the name displayed by the hostname command for example:kracnode1.
3.  Determine the public virtual hostname for each node in the cluster. The virtual host name is a public node name that is used to reroute client requests sent to the node if the node is down. Oracle recommends that you provide a name in the format <public hostname>-vip, for example: kracnode1-vip. The virtual hostname must meet the following requirements:
  •  The virtual IP address and the network name must not be currently in use.
  •  The virtual IP address must be on the same subnet as your public IP address. 
  •   The virtual host name for each node should be registered with your DNS.
4.      Determine the private hostname for each node in the cluster. This private hostname does not need to be resolvable through DNS and should be entered in the /etc/hosts file. A common naming convention for the private hostname is <public hostname>-pvt.
  • The private IP should NOT be accessable to servers not participating in the local cluster.   
  • The private network should be on standalone dedicated switch(es).  
  • The private network should NOT be part of a larger overall network topology.
  • The private network should be deployed on Gigabit Ethernet or better. 
  •  It is recommended that redundant NICs are configured with the Linux bonding driver. Active/passive  is the preferred bonding method due to its simplistic configuration.
[root@kracnode2 ~]#  nslookup kracnode1-vip
Server:         192.168.1.100
Address:        192.168.1.100#53

Name:   kracnode1-vip.india.com
Address: 192.168.1.60
[root@kracnode2 ~]#  nslookup kracnode2-vip
Server:         192.168.1.100
Address:        192.168.1.100#53
Name:   kracnode2-vip.india.com
Address: 192.168.1.61

5.   Define a SCAN DNS name for the cluster that resolves to three IP addresses (round-robin). SCAN IPs must NOT be in the /etc/hosts file, the SCAN name must be resolved by DNS. For DNS configuration setup refer blog Step by Step DNS configuration

[root@kracnode2 ~]# nslookup kracnode-scan
Server:         192.168.1.100
Address:        192.168.1.100#53

Name:   kracnode-scan.india.com
Address: 192.168.1.72
Name:   kracnode-scan.india.com
Address: 192.168.1.70
Name:   kracnode-scan.india.com
Address: 192.168.1.71

6.  Even if you are using a DNS, Oracle recommends that you add lines to the /etc/hosts file on each node, specifying the public IP, VIP and private addresses. Configure the /etc/hosts file so that it is similar to the following example:

NOTE: The SCAN IPs MUST NOT be in the /etc/hosts file. This will result in only 1 SCAN IP for the entire cluster. Refer more about Single Client Access Name(SCAN)

################################################################
####### --------------- Local Host ----------------- ###########
################################################################
127.0.0.1         localhost.localdomain         localhost
################################################################
####### ---------------   eth0 - PUBLIC ------------ ###########
################################################################
192.168.1.50      kracnode1.india.com           kracnode1
192.168.1.51      kracnode2.india.com           kracnode2
################################################################
####### --------------------- VIP ------------------ ###########
################################################################
10.10.10.10       kracnode1-priv.india.com      kracnode1-priv
10.10.10.20       kracnode2-priv.india.com      kracnode2-priv
################################################################
####### ----------------  eth1 - PRIVATE ----------- ###########
################################################################
192.168.1.60      kracnode1-vip.india.com       kracnode1-vip
192.168.1.61      kracnode2-vip.india.com       kracnode2-vip

7.      If you configured the IP addresses in a DNS server, then, as the root user, change the hosts search order in /etc/nsswitch.conf on all nodes as shown here:

Old:
hosts: files nis dns
New:
hosts: dns files nis

After modifying the nsswitch.conf file, restart the nscd daemon on each node using the following
command:

# /sbin/service nscd restart

Synchronizing the Time on ALL Nodes:

[root@krac1 ~]# ls -lr /etc/ntp.conf
-rw-r--r-- 1 root root 1833 Dec  9  2009 /etc/ntp.conf
[root@krac1 ~]# service ntpd stop
Shutting down ntpd:                                        [  OK  ]
[root@krac1 ~]# mv /etc/ntp.conf /etc/ntp.conf.bkp

Configuring Kernel Parameter:

1.      As the root user add the following kernel parameter settings to /etc/sysctl.conf. If any of the arameters are already in the /etc/sysctl.conf file, the higher of the 2 values should be used.

kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
fs.file-max = 6553600
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576

NOTE: The latest information on kernel parameter settings for Linux can be found in My Oracle Support ExtNote:169706.1.

2.      Run the following as the root user to allow the new kernel parameters to be put in place:

#/sbin/sysctl -p

3.      Repeat steps 1 and 2 on all cluster nodes.

NOTE: OUI checks the current settings for various kernel parameters to ensure they meet the minimum requirements for deploying Oracle RAC.

4.     Set shell limits for the oracle and grid user:

To improve the performance of the software on Linux systems, you must increase the shell limits for the
oracle user

Add the following lines to the /etc/security/limits.conf file:

grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536

5.      Add or edit the following line in the /etc/pam.d/login file, if it does not already exist:

session required pam_limits.so

6.       Make the following changes to the default shell startup file, add the following lines to the /etc/profile file:

if [[ $USER = "oracle" ] || [ $USER = "grid" ]]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
umask 022
fi

For the C shell (csh or tcsh), add the following lines to the /etc/csh.login file:

if ( $USER = "oracle" || $USER = "grid" ) then
limit maxproc 16384
limit descriptors 65536
endif

7.      Repeat this procedure on all other nodes in the cluster.

Creating the directories.

1.      Create the Oracle Inventory Director:

To create the Oracle Inventory directory, enter the following commands as the root user:

# mkdir -p /u01/app/oraInventory
# chown -R grid:oinstall /u01/app/oraInventory
# chmod -R 775 /u01/app/oraInventory

2.      Creating the Oracle Grid Infrastructure Home Directory:

# mkdir -p /u01/11.2.0/grid
# chown -R grid:oinstall /u01/11.2.0/grid
# chmod -R 775 /u01/11.2.0/grid

3.      Creating the Oracle Base Directory

To create the Oracle Base directory, enter the following commands as the root user:

# mkdir -p /u01/app/oracle
# mkdir /u01/app/oracle/cfgtoollogs #needed to ensure that dbca is able to run after the rdbms installation.
# chown -R oracle:oinstall /u01/app/oracle
# chmod -R 775 /u01/app/oracle

4.      Creating the Oracle RDBMS Home Directory

To create the Oracle RDBMS Home directory, enter the following commands as the root user:

# mkdir -p /u01/app/oracle/product/11.2.0/db_1
# chown -R oracle:oinstall /u01/app/oracle/product/11.2.0/db_1
# chmod -R 775 /u01/app/oracle/product/11.2.0/db_1

Check OS Software Requirements

The OUI will check for missing packages during the install and you will have the opportunity to install them at that point during the prechecks. Nevertheless you might want to validate that all required packages have been installed prior to launching the OUI.

NOTE: These Requirements are for 64-bit versions of Oracle Enterprise Linux 5 and RedHat? Enterprise Linux 5. Requirements for other supported platforms can be found in My Oracle Support ExtNote:169706.1.

binutils-2.15.92.0.2
compat-libstdc++-33-3.2.3
compat-libstdc++-33-3.2.3 (32 bit)
elfutils-libelf-0.97
elfutils-libelf-devel-0.97
expat-1.95.7
gcc-3.4.6
gcc-c++-3.4.6
glibc-2.3.4-2.41
glibc-2.3.4-2.41 (32 bit)
glibc-common-2.3.4
glibc-devel-2.3.4
glibc-headers-2.3.4
libaio-0.3.105
libaio-0.3.105 (32 bit)
libaio-devel-0.3.105
libaio-devel-0.3.105 (32 bit)
libgcc-3.4.6
libgcc-3.4.6 (32-bit)
libstdc++-3.4.6
libstdc++-3.4.6 (32 bit)
libstdc++-devel 3.4.6
make-3.80
pdksh-5.2.14
sysstat-5.0.5
unixODBC-2.2.11
unixODBC-2.2.11 (32 bit)
unixODBC-devel-2.2.11
unixODBC-devel-2.2.11 (32 bit)

The following command can be run on the system to list the currently installed packages:

rpm -q --qf '%{NAME}-%{VERSION}-%{RELEASE} (%{ARCH})\n' binutils \
compat-libstdc++-33 \
elfutils-libelf \
elfutils-libelf-devel \
gcc \
gcc-c++ \
glibc \
glibc-common \
glibc-devel \
glibc-headers \
ksh \
libaio \
libaio-devel \
libgcc \
libstdc++ \
libstdc++-devel \
make \
sysstat \
unixODBC \
unixODBC-devel

Prepare the shared storage for Oracle RAC
     This section describes how to prepare the shared storage for Oracle RAC Each node in a cluster requires external shared disks for storing the Oracle Clusterware (Oracle Cluster Registry and voting disk) files, and Oracle Database files. To ensure high availability of Oracle Clusterware files on Oracle ASM.
  • All of the devices in an Automatic Storage Management diskgroup should be the same size and have the same performance characteristics. 
  • A diskgroup should not contain more than one partition on a single physical disk device. Using logical volumes as a device in an Automatic Storage Management diskgroup is not supported with Oracle RAC.
  • The user account with which you perform the installation (typically, 'oracle') must have write permissions to create the files in the path that you specify.
Shared Storage

For this example installation we will be using ASM for Clusterware and Database storage on top of SAN technology. The following Table shows the storage layout for this implementation:
Block Device
ASMlib Name
Size
Comments
/dev/sdb
OCR_VOTE01
1 GB
ASM Diskgroup for OCR and Voting Disks
/dev/sdc
OCR_VOTE02
1 GB
ASM Diskgroup for OCR and Voting Disks
/dev/sdd
OCR_VOTE03
1 GB
ASM Diskgroup for OCR and Voting Disks
/dev/sde
ASM_DATA01
4 GB
ASM Data Diskgroup
/dev/sdf
ASM_DATA02
4 GB
ASM Data Diskgroup
/dev/sdg
ASM_DATA03
4 GB
ASM Flash Recovery Area Diskgroup
/dev/sdh
ASM_DATA04
4 GB
ASM Flash Recovery Area Diskgroup

Partition the Shared Disks:

This section describes how to prepare the shared storage for Oracle RAC.

1.   Once the LUNs have been presented from the SAN to ALL servers in the cluster, partition the LUNs from one node only, run fdisk to create a single whole-disk partition with exactly 1 MB offset on each LUN to be used as ASM Disk

[root@kracnode1 ~]# fdisk /dev/sdc
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel. Changes will remain in memory only,
until you decide to write them. After that, of course, the previous
content won't be recoverable.

Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)

Command (m for help): u
Changing display/entry units to sectors

Command (m for help): n
Command action
   e   extended
   p   primary partition (1-4)
p
Partition number (1-4): 1
First sector (63-2097151, default 63):
Using default value 63
Last sector or +size or +sizeM or +sizeK (63-2097151, default 2097151):
Using default value 2097151

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.
[root@kracnode1 ~]#

Load the updated block device partition tables by running the following on ALL servers participating in the cluster:

#/sbin/partprobe

Installing and Configuring ASMLib:

The ASMLib is highly recommended for those systems that will be using ASM for shared storage within the cluster due to the performance and manageability benefits that it provides. Perform the following steps to install and configure ASMLib on the cluster nodes:

1.      Download the following packages from the ASMLib OTN page, if you are an Enterprise Linux customer you can obtain the software through the Unbreakable Linux network.

NOTE: The ASMLib kernel driver MUST match the kernel revision number, the kernel revision number of your system can be identified by running the "uname -r" command. Also, be sure to download the set of RPMs which pertain to your platform architecture, in our case this is i686.

oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm
oracleasmlib-2.0.4-1.el5.i386.rpm
oracleasm-support-2.1.7-1.el5.i386.rpm

2.      Install the RPMs by running the following as the root user.

[root@kracnode1]# rpm -Uvh oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm \
> oracleasm-support-2.1.7-1.el5.i386.rpm \
> oracleasmlib-2.0.4-1.el5.i386.rpm
warning: oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm: Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing...                ########################################### [100%]
   1:oracleasm-support      ########################################### [ 33%]
   2:oracleasm-2.6.18-194.el########################################### [ 67%]
   3:oracleasmlib           ########################################### [100%]
[root@kracnode1]#

3.      Configure ASMLib by running the following as the root user:

[root@kracnode1 ~]# /etc/init.d/oracleasm configure
Configuring the Oracle ASM library driver.

This will configure the on-boot properties of the Oracle ASM library
driver.  The following questions will determine whether the driver is
loaded on boot and what permissions it will have.  The current values
will be shown in brackets ('[]').  Hitting <ENTER> without typing an
answer will keep that current value.  Ctrl-C will abort.

Default user to own the driver interface []: grid
Default group to own the driver interface []: asmdba
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver:                     [  OK  ]
Scanning the system for Oracle ASMLib disks:               [  OK  ]
[root@kracnode1 ~]#

4.      Repeat steps 2 - 4 on ALL cluster nodes.

Using ASMLib to Mark the Shared Disks as Candidate Disks:

To create ASM disks using ASMLib:

1.      As the root user, use oracleasm to create ASM disks using the following syntax:

# /usr/sbin/oracleasm createdisk disk_name device_partition_name

Example:

[root@kracnode1 ~]# /usr/sbin/oracleasm createdisk OCR_VOTE01 /dev/sdc1
Writing disk header: done
Instantiating disk: done
[root@kracnode1 ~]# /usr/sbin/oracleasm createdisk OCR_VOTE02 /dev/sdd1
Writing disk header: done
Instantiating disk: done
[root@kracnode1 ~]# /usr/sbin/oracleasm createdisk OCR_VOTE03 /dev/sde1
Writing disk header: done
Instantiating disk: done
[root@kracnode1 ~]# /usr/sbin/oracleasm createdisk ASM_DISK01 /dev/sdf1
Writing disk header: done
Instantiating disk: done
[root@kracnode1 ~]# /usr/sbin/oracleasm createdisk ASM_DISK02 /dev/sdg1
Writing disk header: done
Instantiating disk: done
[root@kracnode1 ~]# /usr/sbin/oracleasm createdisk ASM_DISK03 /dev/sdh1
Writing disk header: done
Instantiating disk: done
[root@kracnode1 ~]# /usr/sbin/oracleasm createdisk ASM_DISK04 /dev/sdi1
Writing disk header: done
Instantiating disk: done
[root@kracnode1 ~]#

2.      Repeat step 1 for each disk that will be used by Oracle ASM.

After you have created all the ASM disks for your cluster, use the listdisks command to verify their availability:

[root@kracnode1 ~]# /usr/sbin/oracleasm listdisks
ASM_DISK01
ASM_DISK02
ASM_DISK03
ASM_DISK04
OCR_VOTE01
OCR_VOTE02
OCR_VOTE03

3.   On all the other nodes in the cluster, use the scandisks command as the root user to pickup the newly created ASM disks. You do not need to create the ASM disks on each node, only on one node in the cluster.

[root@kracnode2 ]# /usr/sbin/oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
[root@kracnode2 ]# /usr/sbin/oracleasm listdisks
ASM_DISK01
ASM_DISK02
ASM_DISK03
ASM_DISK04
OCR_VOTE01
OCR_VOTE02
OCR_VOTE03
[root@kracnode2 ]#
  
Oracle Grid Infrastructure Install. (Basic Grid Infrastructure Install (without GNS and IPMI))
./runInstaller












 Action: Select radio button 'Install and Configure Grid Infrastructure for a Cluster' and click ' Next> '
Action: Select radio button 'Advanced Installation' and click ' Next> '

Action: Accept 'English' as language' and click ' Next> '

Action: Specify your cluster name and the SCAN name you want to use and click ' Next> '  
Note: Make sure 'Configure GNS' is NOT selected.

Action:Use the Edit and Add buttons to specify the node names and virtual IP addresses you configured previously in your /etc/hosts file. Use the 'SSH Connectivity' button to configure/test the passwordless SSH connectivity between your nodes.
ACTION: Type in the OS password for the user 'grid' and press 'Setup'
Action:Click on 'Interface Type' next to the Interfaces you want to use for your cluster and select the correct values for 'Public', 'Private' and 'Do Not Use' . When finished click ' Next> '
 
Action:Select radio button 'Automatic Storage Management (ASM) and click ' Next> '

Action:Select the 'DiskGroup Name' specify the 'Redundancy' and tick the disks you want to use, when done click ' Next> '  
NOTE: The number of voting disks that will be created depend on the redundancy level you specify: EXTERNAL will create 1 voting disk, NORMAL will create 3 voting disks, HIGH will create 5 voting disks.

Action:Specify and conform the password you want to use and click ' Next> '

Action:Select NOT to use IPMI and click ' Next> '
Action: Assign the correct OS groups for OS authentication and click ' Next> '
Action:Specify the locations for your ORACLE_BASE and for the Software location and click ' Next> ' 
Action: Specify the locations for your Inventory directory and click ' Next> '

Note: OUI performs certain checks and comes back with the screen below
Action:Check that status of all checks is Succeeded and click ' Next> '
Action:Click ' Finish' 























Action:Follow the instructions on the screen running the orainstRoot.sh and root.sh scripts as root on all  nodes before you click 'OK'
NOTE: The required root scripts MUST BE RUN ON ONE NODE AT A TIME!

Sample root.sh output node 1: 

#/u01/app/11.2.0/grid/root.sh
Running Oracle 11g root.sh script...
The following environment variables are set as:
    ORACLE_OWNER= oracle
    ORACLE_HOME=  /u01/app/11.2.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]:
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.
2009-09-25 20:58:00: Parsing the host name
2009-09-25 20:58:00: Checking for super user privileges
2009-09-25 20:58:00: User has super user privileges
Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params
Creating trace directory
LOCAL ADD MODE
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
  root wallet
  root wallet cert
  root cert export
  peer wallet
  profile reader wallet
  pa wallet
  peer wallet keys
  pa wallet keys
  peer cert request
  pa cert request
  peer cert
  pa cert
  peer root cert TP
  profile reader root cert TP
  pa root cert TP
  peer pa cert TP
  pa peer cert TP
  profile reader pa cert TP
  profile reader peer cert TP
  peer user cert
  pa user cert
Adding daemon to inittab
CRS-4123: Oracle High Availability Services has been started.
ohasd is starting
CRS-2672: Attempting to start 'ora.gipcd' on 'kracnode1'
CRS-2672: Attempting to start 'ora.mdnsd' on 'kracnode1'
CRS-2676: Start of 'ora.gipcd' on 'kracnode1' succeeded
CRS-2676: Start of 'ora.mdnsd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'kracnode1'
CRS-2676: Start of 'ora.gpnpd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'kracnode1'
CRS-2676: Start of 'ora.cssdmonitor' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'kracnode1'
CRS-2672: Attempting to start 'ora.diskmon' on 'kracnode1'
CRS-2676: Start of 'ora.diskmon' on 'kracnode1' succeeded
CRS-2676: Start of 'ora.cssd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.ctssd' on 'kracnode1'
CRS-2676: Start of 'ora.ctssd' on 'kracnode1' succeeded
ASM created and started successfully.
DiskGroup OCR_VOTE created successfully.
clscfg: -install mode specified
Successfully accumulated necessary OCR keys.
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
CRS-2672: Attempting to start 'ora.crsd' on 'kracnode1'
CRS-2676: Start of 'ora.crsd' on 'kracnode1' succeeded
CRS-4256: Updating the profile
Successful addition of voting disk 24ce55bef20d4f4ebf16d733a91bc9d3.
Successfully replaced voting disk group with +DATA.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
##  STATE    File Universal Id                File Name Disk group
--  -----    -----------------                --------- ---------
 1. ONLINE   24ce55bef20d4f4ebf16d733a91bc9d3 (ORCL:OCR_VOTE01) [OCR_VOTE]
 2. ONLINE   24ce55bef20d4f4ebf16d733a91bc9d3 (ORCL:OCR_VOTE02) [OCR_VOTE]
 3. ONLINE   24ce55bef20d4f4ebf16d733a91bc9d3 (ORCL:OCR_VOTE03) [OCR_VOTE]
Located 1 voting disk(s).
CRS-2673: Attempting to stop 'ora.crsd' on 'kracnode1'
CRS-2677: Stop of 'ora.crsd' on 'kracnode1' succeeded
CRS-2673: Attempting to stop 'ora.asm' on 'kracnode1'
CRS-2677: Stop of 'ora.asm' on 'kracnode1' succeeded
CRS-2673: Attempting to stop 'ora.ctssd' on 'kracnode1'
CRS-2677: Stop of 'ora.ctssd' on 'kracnode1' succeeded
CRS-2673: Attempting to stop 'ora.cssdmonitor' on 'kracnode1'
CRS-2677: Stop of 'ora.cssdmonitor' on 'kracnode1' succeeded
CRS-2673: Attempting to stop 'ora.cssd' on 'kracnode1'
CRS-2677: Stop of 'ora.cssd' on 'kracnode1' succeeded
CRS-2673: Attempting to stop 'ora.gpnpd' on 'kracnode1'
CRS-2677: Stop of 'ora.gpnpd' on 'kracnode1' succeeded
CRS-2673: Attempting to stop 'ora.gipcd' on 'kracnode1'
CRS-2677: Stop of 'ora.gipcd' on 'kracnode1' succeeded
CRS-2673: Attempting to stop 'ora.mdnsd' on 'kracnode1'
CRS-2677: Stop of 'ora.mdnsd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.mdnsd' on 'kracnode1'
CRS-2676: Start of 'ora.mdnsd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.gipcd' on 'kracnode1'
CRS-2676: Start of 'ora.gipcd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'kracnode1'
CRS-2676: Start of 'ora.gpnpd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'kracnode1'
CRS-2676: Start of 'ora.cssdmonitor' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'kracnode1'
CRS-2672: Attempting to start 'ora.diskmon' on 'kracnode1'<blockquote><pre>
CRS-2676: Start of 'ora.diskmon' on 'kracnode1' succeeded
CRS-2676: Start of 'ora.cssd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.ctssd' on 'kracnode1'
CRS-2676: Start of 'ora.ctssd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'kracnode1'
CRS-2676: Start of 'ora.asm' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.crsd' on 'kracnode1'
CRS-2676: Start of 'ora.crsd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.evmd' on 'kracnode1'
CRS-2676: Start of 'ora.evmd' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'kracnode1'
CRS-2676: Start of 'ora.asm' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.DATA.dg' on 'kracnode1'
CRS-2676: Start of 'ora.OCR_VOTE.dg' on 'kracnode1' succeeded
CRS-2672: Attempting to start 'ora.registry.acfs' on 'kracnode1'
CRS-2676: Start of 'ora.registry.acfs' on 'kracnode1' succeeded
kracnode1     2010/09/25 21:07:03    /u01/app/11.2.0/grid/cdata/kracnode1/backup_20090925_210703.olr
Preparing packages for installation...
cvuqdisk-1.0.7-1
Configure Oracle Grid Infrastructure for a Cluster ... succeeded
Updating inventory properties for clusterware
Starting Oracle Universal Installer...
Checking swap space: must be greater than 500 MB.   Actual 755 MB   Passed
The inventory pointer is located at /etc/oraInst.loc
The inventory is located at /u01/app/oraInventory
'UpdateNodeList' was successful.
#
Sample root.sh output node 2: 
  
#/u01/app/11.2.0/grid/root.sh
Running Oracle 11g root.sh script...
The following environment variables are set as:
    ORACLE_OWNER= oracle
    ORACLE_HOME=  /u01/app/11.2.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]:
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root.sh script.
Now product-specific root actions will be performed.
2009-09-25 21:08:32: Parsing the host name
2009-09-25 21:08:32: Checking for super user privileges
2009-09-25 21:08:32: User has super user privileges
Using configuration parameter file: /u01/app/11.2.0/grid/crs/install/crsconfig_params
Creating trace directory
LOCAL ADD MODE
Creating OCR keys for user 'root', privgrp 'root'..
Operation successful.
Adding daemon to inittab
CRS-4123: Oracle High Availability Services has been started.
ohasd is starting
CRS-4402: The CSS daemon was started in exclusive mode but found an active CSS daemon on node rac1, number 1, and is terminating
CRS-2673: Attempting to stop 'ora.cssdmonitor' on 'kracnode2'
CRS-2677: Stop of 'ora.cssdmonitor' on 'kracnode2' succeeded
An active cluster was found during exclusive startup, restarting to join the cluster
CRS-2672: Attempting to start 'ora.mdnsd' on 'kracnode2'
CRS-2676: Start of 'ora.mdnsd' on 'kracnode2' succeeded
CRS-2672: Attempting to start 'ora.gipcd' on 'kracnode2'
CRS-2676: Start of 'ora.gipcd' on 'kracnode2' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'kracnode2'
CRS-2676: Start of 'ora.gpnpd' on 'kracnode2' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'kracnode2'
CRS-2676: Start of 'ora.cssdmonitor' on 'kracnode2' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'kracnode2'
CRS-2672: Attempting to start 'ora.diskmon' on 'kracnode2'
CRS-2676: Start of 'ora.diskmon' on 'kracnode2' succeeded
CRS-2676: Start of 'ora.cssd' on 'kracnode2' succeeded
CRS-2672: Attempting to start 'ora.ctssd' on 'kracnode2'
CRS-2676: Start of 'ora.ctssd' on 'kracnode2' succeeded
CRS-2672: Attempting to start 'ora.drivers.acfs' on 'kracnode2'
CRS-2676: Start of 'ora.drivers.acfs' on 'kracnode2' succeeded
CRS-2672: Attempting to start 'ora.asm' on 'kracnode2'
CRS-2676: Start of 'ora.asm' on 'kracnode2' succeeded
CRS-2672: Attempting to start 'ora.crsd' on 'kracnode2'
CRS-2676: Start of 'ora.crsd' on 'kracnode2' succeeded
CRS-2672: Attempting to start 'ora.evmd' on 'kracnode2'
CRS-2676: Start of 'ora.evmd' on 'kracnode2' succeeded
kracnode2     2010/09/25 21:13:55    /u01/app/11.2.0/grid/cdata/kracnode2/backup_20090925_211355.olr
Preparing packages for installation...
cvuqdisk-1.0.7-1
Configure Oracle Grid Infrastructure for a Cluster ... succeeded
Updating inventory properties for clusterware
Starting Oracle Universal Installer...
Checking swap space: must be greater than 500 MB.   Actual 1036 MB   Passed
The inventory pointer is located at /etc/oraInst.loc
The inventory is located at /u01/app/oraInventory
'UpdateNodeList' was successful.
 Action: Wait for the OUI to finish the cluster configuration. 
Action: You should see the confirmation that installation of the Grid Infrastructure was successful. Click 'Close' to finish the install.

RDBMS Software Install:
change into the directory where you staged the RDBMS software
./runInstaller













Action: Provide your e-mail address, tick the check box and provide your Oracle Support Password if you want to receive Security Updates from Oracle Support and click ' Next> '

Action: Select the option 'Install Database software only' and click ' Next> '

Action: Select 'Real Application Clusters database installation', and select all nodes. Use the 'SSH Connectivity' button to configure/test the passwordless SSH connectivity between your nodes '

 Action: Type the Oracle user password and click "Setup"

Action: To confirm English as selected language click ' Next> '

Action: Make sure radio button 'Enterprise Edition' is ticked, click ' Next> '

Action: Specify path to your Oracle Base and below to the location where you want to store the software (Oracle home). Click ' Next> '
Action:Use the drop down menu to select the names of the Database Administrators and Database perators group and click ' Next> '

Action: Check that the status of all checks is 'Succeeded' and click ' Next> ' 
Note: If you are sure the unsuccessful checks can be ignored tick the box 'Ignore All' before you click ' Next> '

Action: Perfrom a last check that the information on the screen is correct before you click ' Finish '

Action: Log in to a terminal window as root user and run the root.sh script on the first node. When finished do the same for all other nodes in your cluster as well. When finished click 'OK'
NOTE: root.sh should be run on one node at a time.
Action: Click ' Close ' to finish the installation of the RDBMS Software.

Run ASMCA to create diskgroups
As the grid user start the ASM Configuration Assistant (ASMCA)

#su - grid
cd /u01/11.2.0/grid/bin
./asmca

Action: Click 'Create' to create a new diskgroup

Action: Type in a name for the diskgroup, select the redundancy you want to provide and mark the tick box for the disks you want to assign to the new diskgroup. Click

Repeat the same steps create the diskgroup for the flash recovery area.
Action: Click 'Exit'
Run DBCA to create the database

As the oracle user start the Database Configuration Assistant (DBCA)

#su - oracle
cd /u01/app/oracle/product/11.2.0/db_1/bin
./dbca


Action: choose option 'Create a Database' and click 'Next'

Action: Select the database template that you want to use for your database and click 'Next'
Action: Type in the name you want to use for your database and select all nodes before you click 'Next'

Action: select the options you want to use to manage your database and click 'Next'

Action: Type in the passwords you want to use and click 'Next'

Action: Select the diskgroup you created for the database files and click 'Multiplex Redo Logs and Control Files'. In the popup window define the diskgroup that should contain controlfiles and redo logfiles and the diskgroup that should contain the mirrored files.
Action : Enter the ASMSNMP Password and click "OK"
Action: Specify the diskgroup that was created for the flash recovery area and define the size. If the size is smaller than recommended a warning will popup.
Action: Select if you want to have sample schemas created in your database and click 'Next'
Action: Review and change the settings for memory allocation, characterset etc. according to your needs and click 'Next'
Action: Review the database storage settings and click 'Next'
 Action: Review the database storage settings and click 'Next'




















Action: The database is now created, you can either change or unlock your passwords or just click Exit to finish the database creation.


Related Blogs: