Oracle DBA Queries

 Useful Queries for Checking Concurrent Requests and Programs 

Hi DBAs,

As an Apps DBA we have to monitor the Concurrent jobs status and details frequently,This can be done with the help of the below queries.


-- Concurrent Program Queries for Apps DBA:


-- Query 1:For checking the locks in concurrent jobs 


SELECT DECODE(request,0,'Holder: ','Waiter: ')||sid sess,inst_id,id1, id2, lmode, request, type FROM gV$LOCK 

WHERE (id1, id2, type) IN (SELECT id1, id2, type FROM gV$LOCK WHERE request>0) ORDER BY id1,request;


-- Query 2:For checking the concurrent programs running currently with Details of Processed time-- and Start Date


 SELECT DISTINCT c.USER_CONCURRENT_PROGRAM_NAME,round(((sysdate-a.actual_start_date)*24*60*60/60),2) AS Process_time,

 a.request_id,a.parent_request_id,a.request_date,a.actual_start_date,a.actual_completion_date,(a.actual_completion_date-a.request_date)*24*60*60 AS end_to_end,

 (a.actual_start_date-a.request_date)*24*60*60 AS lag_time,d.user_name, a.phase_code,a.status_code,a.argument_text,a.priority

FROM   apps.fnd_concurrent_requests a,apps.fnd_concurrent_programs b,apps.FND_CONCURRENT_PROGRAMS_TL c,apps.fnd_user d

WHERE  a.concurrent_program_id=b.concurrent_program_id AND b.concurrent_program_id=c.concurrent_program_id AND

a.requested_by=d.user_id AND status_code='R' order by Process_time desc;

    

-- Query 3:For checking last run of a Concurrent Program along with Processed time

-- Useful to find the Details of Concurrent programs which run daily and comparison purpose


SELECT DISTINCT c.USER_CONCURRENT_PROGRAM_NAME,

            round(((a.actual_completion_date-a.actual_start_date)*24*60*60/60),2) AS Process_time,

            a.request_id,a.parent_request_id,To_Char(a.request_date,'DD-MON-YY HH24:MI:SS'),To_Char(a.actual_start_date,'DD-MON-YY HH24:MI:SS'),

  To_Char(a.actual_completion_date,'DD-MON-YY HH24:MI:SS'), (a.actual_completion_date-a.request_date)*24*60*60 AS end_to_end,

            (a.actual_start_date-a.request_date)*24*60*60 AS lag_time,

            d.user_name, a.phase_code,a.status_code,a.argument_text,a.priority

FROM   apps.fnd_concurrent_requests a,

            apps.fnd_concurrent_programs b ,

            apps.FND_CONCURRENT_PROGRAMS_TL c,

            apps.fnd_user d

WHERE       a.concurrent_program_id= b.concurrent_program_id AND

            b.concurrent_program_id=c.concurrent_program_id AND

            a.requested_by =d.user_id AND

--          trunc(a.actual_completion_date) = '24-AUG-2005'

c.USER_CONCURRENT_PROGRAM_NAME='Incentive Compensation Analytics - ODI' --  and argument_text like  '%, , , , ,%';

--          and status_code!='C'


-- Query 4:For Checking the last run of concurrent Program.

- Use below query to check all the concurrent request running which may refer given package

-- This is very useful check before compiling any package on given instance.

-- The query can be modified as per requirement.

-- Remove FND_CONCURRENT_REQUESTS table and joins to check all program dependent on given package.


SELECT

 FCR.REQUEST_ID

,FCPV.USER_CONCURRENT_PROGRAM_NAME

,FCPV.CONCURRENT_PROGRAM_NAME

,FCPV.CONCURRENT_PROGRAM_ID

,FCR.STATUS_CODE

,FCR.PHASE_CODE

FROM FND_CONCURRENT_PROGRAMS_VL FCPV

,FND_EXECUTABLES FE

,SYS.DBA_DEPENDENCIES DD

,FND_CONCURRENT_REQUESTS FCR

WHERE FCPV.EXECUTABLE_ID = FE.EXECUTABLE_ID

AND FE.EXECUTION_METHOD_CODE = 'I'

AND SUBSTR(FE.EXECUTION_FILE_NAME,1,INSTR(FE.EXECUTION_FILE_NAME, '.', 1, 1) - 1) = UPPER(DD.NAME)

AND DD.REFERENCED_TYPE IN ('VIEW', 'TABLE', 'TRIGGER', 'PACKAGE') -- add as required

--AND referenced_owner = 'XXCUS'

AND DD.REFERENCED_NAME = UPPER('&Package_name')

AND FCR.CONCURRENT_PROGRAM_ID = FCPV.CONCURRENT_PROGRAM_ID

AND fcr.phase_code NOT IN ( 'C','P');


-- Query 5:The following query will display the time taken to execute the concurrent Programs

--for a particular user with the latest concurrent programs sorted in least time taken 

-- to complete the request. 


 SELECT

      f.request_id ,

      pt.user_concurrent_program_name user_conc_program_name,

      f.actual_start_date start_on,

      f.actual_completion_date end_on,

      floor(((f.actual_completion_date-f.actual_start_date)

        *24*60*60)/3600)

        || ' HOURS ' ||

        floor((((f.actual_completion_date-f.actual_start_date)

        *24*60*60) -

        floor(((f.actual_completion_date-f.actual_start_date)

        *24*60*60)/3600)*3600)/60)

        || ' MINUTES ' ||

        round((((f.actual_completion_date-f.actual_start_date)

        *24*60*60) -

        floor(((f.actual_completion_date-f.actual_start_date)

        *24*60*60)/3600)*3600 -

        (floor((((f.actual_completion_date-f.actual_start_date)

        *24*60*60) -

        floor(((f.actual_completion_date-f.actual_start_date)

        *24*60*60)/3600)*3600)/60)*60) ))

        || ' SECS ' time_difference,

      p.concurrent_program_name concurrent_program_name,

      decode(f.phase_code,'R','Running','C','Complete',f.phase_code) Phase,

      f.status_code

from  apps.fnd_concurrent_programs p,

      apps.fnd_concurrent_programs_tl pt,

      apps.fnd_concurrent_requests f

where f.concurrent_program_id = p.concurrent_program_id

      and f.program_application_id = p.application_id

      and f.concurrent_program_id = pt.concurrent_program_id

      and f.program_application_id = pt.application_id

      AND pt.language = USERENV('Lang')

      and f.actual_start_date is not null

order by

      f.actual_start_date desc;


--Query 6: By using the below Query we can get sid,serial#,spid of the concurrent Request..

    

    SELECT a.request_id, d.sid, d.serial# , c.SPID

    FROM apps.fnd_concurrent_requests a,

    apps.fnd_concurrent_processes b,

    v$process c,

    v$session d

    WHERE a.controlling_manager = b.concurrent_process_id

    AND c.pid = b.oracle_process_id

    AND b.session_id=d.audsid

    AND a.request_id = &Request_ID

    AND a.phase_code = 'R';


--Query 7: By using below Concurrent Manager and Program rules...

--Gives Detail of the Concurrent_queue_name and User_concurrent_program_name


SELECT b.concurrent_queue_name, c.user_concurrent_program_name 

FROM FND_CONCURRENT_QUEUE_CONTENT a, fnd_concurrent_queues b, fnd_concurrent_programs_vl c

WHERE a.queue_application_id = 283 

and a.concurrent_queue_id = b.concurrent_queue_id

and a.type_id = c.concurrent_program_id

order by decode(INCLUDE_FLAG, 'I', 1, 2), type_code;

    

--Query 8: Gives Details of Running Concurrent jobs


SELECT DISTINCT c.USER_CONCURRENT_PROGRAM_NAME,

      round(((sysdate-a.actual_start_date)*24*60*60/60),2) AS Process_time,

    a.request_id,a.parent_request_id,a.request_date,a.actual_start_date,a.actual_completion_date,

      (a.actual_completion_date-a.request_date)*24*60*60 AS end_to_end,

      (a.actual_start_date-a.request_date)*24*60*60 AS lag_time,

      d.user_name, a.phase_code,a.status_code,a.argument_text,a.priority

FROM     apps.fnd_concurrent_requests a,

    apps.fnd_concurrent_programs b , 

    apps.FND_CONCURRENT_PROGRAMS_TL c,

    apps.fnd_user d

WHERE   a.concurrent_program_id=b.concurrent_program_id AND

    b.concurrent_program_id=c.concurrent_program_id AND

    a.requested_by=d.user_id AND

    status_code='R' order by Process_time desc;

    

 -- Query 9: Gives detail of Concurrent job completed and pending


SELECT

 FCR.REQUEST_ID

,FCPV.USER_CONCURRENT_PROGRAM_NAME

,FCPV.CONCURRENT_PROGRAM_NAME

,FCPV.CONCURRENT_PROGRAM_ID

,FCR.STATUS_CODE

,FCR.PHASE_CODE

FROM FND_CONCURRENT_PROGRAMS_VL FCPV

,FND_EXECUTABLES FE

,SYS.DBA_DEPENDENCIES DD

,FND_CONCURRENT_REQUESTS FCR

WHERE FCPV.EXECUTABLE_ID = FE.EXECUTABLE_ID

AND FE.EXECUTION_METHOD_CODE = 'I'

AND SUBSTR(FE.EXECUTION_FILE_NAME,1,INSTR(FE.EXECUTION_FILE_NAME, '.', 1, 1) - 1) = UPPER(DD.NAME)

AND DD.REFERENCED_TYPE IN ('VIEW', 'TABLE', 'TRIGGER', 'PACKAGE') -- add as required

--AND referenced_owner = 'XXCUS'

AND DD.REFERENCED_NAME = UPPER('&Package_name')

AND FCR.CONCURRENT_PROGRAM_ID = FCPV.CONCURRENT_PROGRAM_ID

AND fcr.phase_code NOT IN ( 'C','P');


-- Query 10:Gives Detail of Running and Completed Concurrent jobs with Start date and end date 

-- Latest one at the top


select

      f.request_id ,

      pt.user_concurrent_program_name user_conc_program_name,

      f.actual_start_date start_on,

      f.actual_completion_date end_on,

      floor(((f.actual_completion_date-f.actual_start_date)

        *24*60*60)/3600)

        || ' HOURS ' ||

        floor((((f.actual_completion_date-f.actual_start_date)

        *24*60*60) -

        floor(((f.actual_completion_date-f.actual_start_date)

        *24*60*60)/3600)*3600)/60)

        || ' MINUTES ' ||

        round((((f.actual_completion_date-f.actual_start_date)

        *24*60*60) -

        floor(((f.actual_completion_date-f.actual_start_date)

        *24*60*60)/3600)*3600 -

        (floor((((f.actual_completion_date-f.actual_start_date)

        *24*60*60) -

        floor(((f.actual_completion_date-f.actual_start_date)

        *24*60*60)/3600)*3600)/60)*60) ))

        || ' SECS ' time_difference,

      p.concurrent_program_name concurrent_program_name,

      decode(f.phase_code,'R','Running','C','Complete',f.phase_code) Phase,

      f.status_code

from  apps.fnd_concurrent_programs p,

      apps.fnd_concurrent_programs_tl pt,

      apps.fnd_concurrent_requests f

where f.concurrent_program_id = p.concurrent_program_id

      and f.program_application_id = p.application_id

      and f.concurrent_program_id = pt.concurrent_program_id

      and f.program_application_id = pt.application_id

      AND pt.language = USERENV('Lang')

      and f.actual_start_date is not null

order by

      f.actual_start_date desc;


--- Query 11 wait events details related with Concurrent programs


SELECT s.saddr, s.SID, s.serial#, s.audsid, s.paddr, s.user#, s.username,

s.command, s.ownerid, s.taddr, s.lockwait, s.status, s.server,

s.schema#, s.schemaname, s.osuser, s.process, s.machine, s.terminal,

UPPER (s.program) program, s.TYPE, s.sql_address, s.sql_hash_value,

s.sql_id, s.sql_child_number, s.sql_exec_start, s.sql_exec_id,

s.prev_sql_addr, s.prev_hash_value, s.prev_sql_id,

s.prev_child_number, s.prev_exec_start, s.prev_exec_id,

s.plsql_entry_object_id, s.plsql_entry_subprogram_id,

s.plsql_object_id, s.plsql_subprogram_id, s.module, s.module_hash,

s.action, s.action_hash, s.client_info, s.fixed_table_sequence,

s.row_wait_obj#, s.row_wait_file#, s.row_wait_block#,

s.row_wait_row#, s.logon_time, s.last_call_et, s.pdml_enabled,

s.failover_type, s.failover_method, s.failed_over,

s.resource_consumer_group, s.pdml_status, s.pddl_status, s.pq_status,

s.current_queue_duration, s.client_identifier,

s.blocking_session_status, s.blocking_instance, s.blocking_session,

s.seq#, s.event#, s.event, s.p1text, s.p1, s.p1raw, s.p2text, s.p2,

s.p2raw, s.p3text, s.p3, s.p3raw, s.wait_class_id, s.wait_class#,

s.wait_class, s.wait_time, s.seconds_in_wait, s.state,

s.wait_time_micro, s.time_remaining_micro,

s.time_since_last_wait_micro, s.service_name, s.sql_trace,

s.sql_trace_waits, s.sql_trace_binds, s.sql_trace_plan_stats,

s.session_edition_id, s.creator_addr, s.creator_serial#

FROM v$session s

WHERE ( (s.username IS NOT NULL)

AND (NVL (s.osuser, 'x') <> 'SYSTEM')

AND (s.TYPE <> 'BACKGROUND') AND STATUS='ACTIVE'

)

ORDER BY "PROGRAM";


-- Query 12 To find the pid of the Concurrent job and kill it.


Select a.inst_id, sid, b.spid 

from gv$session a, gv$process b,apps.fnd_concurrent_requests c where a.paddr = b.addr and request_ID ='31689665'  

AND a.inst_id = b.inst_id and c.os_process_id = a.process;


-- Query 13:To find the Database SID of the Concurrent job

-- We need our concurrent request ID as an input.

-- c.SPID= is the operating system process id

-- d.sid= is the Oracle process id


SQL> column process heading "FNDLIBR PID"

SELECT a.request_id, d.sid, d.serial# ,d.osuser,d.process , c.SPID

FROM apps.fnd_concurrent_requests a,

apps.fnd_concurrent_processes b,

v$process c,

v$session d

WHERE a.controlling_manager = b.concurrent_process_id

AND c.pid = b.oracle_process_id

AND b.session_id=d.audsid

AND a.request_id = &Request_ID

AND a.phase_code = 'R';



-- Query 14:To find the currently running SQL after finding SID from Query 12


SELECT SQL_TEXT FROM V$SQLAREA WHERE (ADDRESS, HASH_VALUE) IN

(SELECT SQL_ADDRESS, SQL_HASH_VALUE FROM V$SESSION WHERE SID=11710);


-- Query 15:To find what exactly the program,module and the SQL it is executing

--P2 column should change that means Concurrent program is changing blocks and moving


select sid,serial#,p2,program,module from gv$session where sid=11710;


-- Query 16:To find any blocking is there in RAC or non-RAC Database


select process,sid,blocking_session,BLOCKING_INSTANCE from gv$session

where blocking_session is not null;


-- Query17:To check the SID and concurrent program details, where 5991=SID


select sid,serial#,program,module,p2 from gv$session where SID=5991;


-- Query 18: To check time remaining in minutes and elapsed  in minutes for long running job 


Select round(sofar*100/totalwork,2)"finished(%)",

Sid,

Serial#,

Opname,

Target,

Sofar,

Totalwork,

Units,

(Time_Remaining/60) Time_Rem_Mins,

(Elapsed_Seconds/60) Elapsed_Time_Mins

From gV$session_Longops

where TIME_REMAINING>0;


--Query 19:To get the location of the trace file for Concurrent request id,eg:The Request ID --(205979614)  

SELECT 'Request id: '||request_id , 

'Trace id: '||oracle_Process_id, 

'Trace Flag: '||req.enable_trace, 

'Trace Name: 

'||dest.value||'/'||lower(dbnm.value)||'_ora_'||oracle_process_id||'.trc', 

'Prog. Name: '||prog.user_concurrent_program_name, 

'File Name: '||execname.execution_file_name|| execname.subroutine_name , 

'Status : '||decode(phase_code,'R','Running') 

||'-'||decode(status_code,'R','Normal'), 

'SID Serial: '||ses.sid||','|| ses.serial#, 

'Module : '||ses.module 

from fnd_concurrent_requests req, v$session ses, v$process proc, 

v$parameter dest, v$parameter dbnm, fnd_concurrent_programs_vl prog, 

fnd_executables execname 

where req.request_id = &request 

and req.oracle_process_id=proc.spid(+) 

and proc.addr = ses.paddr(+) 

and dest.name='user_dump_dest' 

and dbnm.name='db_name' 

and req.concurrent_program_id = prog.concurrent_program_id 

and req.program_application_id = prog.application_id 

--- and prog.application_id = execname.application_id 

and prog.executable_application_id = execname.application_id

and prog.executable_id=execname.executable_id;


Note:We can use the above query to diagnose the Concurrent job failure by enabling trace on that job first and than running that concurrent job again.


--Query 20:To list the concurrent programs that run today,Giving details about status and logs


SELECT DISTINCT fcp.user_concurrent_program_name,

                fcp.concurrent_program_name,

                fcr.request_id,

                fcr.request_date,

                flv.meaning status,

                fcr.status_code,

                fcr.completion_text,

                fcr.logfile_name,

                fcr.outfile_name,

                fcr.argument_text

  FROM apps.fnd_concurrent_programs_vl fcp,

       apps.fnd_concurrent_requests    fcr,

       apps.fnd_lookup_values          flv

 WHERE fcr.concurrent_program_id = fcp.concurrent_program_id

   AND trunc(fcr.last_update_date) = trunc(SYSDATE)

   AND flv.lookup_code = fcr.status_code

   AND flv.lookup_type = 'CP_STATUS_CODE'

   AND flv.language = 'US'

 ORDER BY fcr.request_date,

          fcr.request_id DESC;



Hope it helps...


Enjoy learning,practicing  and doing Apps DBA tasks...


Best regards,


Rafi.


Note:Check www.orasols.com for the services we offer.

  

Tablespace critical threshold message 

Hi, 

A DBA's main responsibility also include checking the tablespace space regularly,We can check this space each time with help of some useful views like dba_data_files provided by oracle.But this is tough some time whenever some developer load the data and suddenly you see the space almost occupied.So I decided to use the below script which will send me the alert mail whenever the space becomes 85% filled(critical threshold) which will be very much helpful to make sure the tablespace is having enough space. 


The script is as given below: 


Tablespace critical message when it becomes 85% occupied: 

---------------------------------------------------------- 


#!/bin/ksh 


export PATH=/star/app/oracle/product/11.1.0/db_1/bin:/usr/sbin:/usr/kerberos/bin:/usr/local/bin:/bin:/usr/bin:/home/star/bin 

export ORACLE_BASE=/star/app/oracle 

export LD_LIBRARY_PATH=/star/app/oracle/product/11.1.0/db_1/lib:/lib:/usr/lib 

export LD_LIBRARY_PATH=/ora11g/app/oracle/product/11.1.0.7/lib:/usr/lib:/usr/ccs/bin:/usr/dt/lib 

export ORACLE_HOME=/star/app/oracle/product/11.1.0/db_1 



MAILLIST=`cat /home/star/scripts/email.lst` 

LOGFILE=/tmp/chk_ts_$1.log 


echo $1 


CHKUP=`ps -ef | grep pmon | grep $1 | wc -l` 


if [ "${CHKUP}" -eq 1 ] 

then 

echo "The Pmon Process is running on host" 

else 

echo " ****************************************************************" >> $LOGFILE 

echo " " `date` " Pmon Proces is not found" >> $LOGFILE 

echo " ****************************************************************" >> $LOGFILE 

exit 0 

fi 


export ORACLE_SID=$1 


sqlplus -s "/ as sysdba" <

WHENEVER SQLERROR EXIT SQL.SQLCODE 

set echo off 

set trimspool on 

set trimout on 

set verify off 

set feedback off 

column TABLESPACE_NAME format a18 

column "%ocup" format a6 

set lines 200 

spool /tmp/chk_ts_$1.spl 

SELECT total.tablespace_name tablespace_name, 

ROUND (tot / 1024 / 1024) total, 

ROUND ((tot - tot_l) / 1024 / 1024) occupied, 

ROUND (tot_l / 1024 / 1024) remain, 

ROUND (max_l / 1024 / 1024) max_extent, 

ROUND ((tot - tot_l) * 100 / tot) || '%' AS "%ocup" 

FROM (SELECT tablespace_name, SUM (BYTES) tot_l, MAX (BYTES) max_l 

FROM dba_free_space 

GROUP BY tablespace_name 

UNION 

SELECT d.tablespace_name, 

f.bytes_free + f.bytes_used - NVL (p.bytes_used, 0) tot_l, 

0 max_l 

FROM SYS.v_\$temp_space_header f, 

dba_temp_files d, 

SYS.v_\$temp_extent_pool p 

WHERE f.tablespace_name(+) = d.tablespace_name AND f.file_id(+) = d.file_id 

AND p.file_id(+) = d.file_id) libre, 

(SELECT tablespace_name, SUM (BYTES) tot 

FROM dba_data_files 

GROUP BY tablespace_name 

UNION 

(SELECT tablespace_name, SUM (BYTES) tot 

FROM dba_temp_files 

GROUP BY tablespace_name)) total, 

dba_tablespaces dba_t 

WHERE total.tablespace_name = libre.tablespace_name(+) 

AND total.tablespace_name = dba_t.tablespace_name 

AND ROUND (tot_l / 1024 / 1024) < 1024 

AND ROUND ((tot - tot_l) * 100 / tot) > 85 

AND dba_t.tablespace_name NOT IN ('RBS') 

ORDER BY ROUND ((tot - tot_l) * 100 / tot) DESC; 

spo off 

EOF 

if [ -s /tmp/chk_ts_$1.spl ]; then 

cat /tmp/chk_ts_$1.spl | mailx -s "Critical : ($1 on machinename.abcd.com.sg) Missing free space on tablespace of the database $1 -- Immediate Action" $MAILLIST 

echo "---------------------------------------------------------------------------" >> $LOGFILE 

echo `date` >> $LOGFILE 

cat /tmp/chk_ts_$1.spl >> $LOGFILE 

echo "---------------------------------------------------------------------------" >> $LOGFILE 

rm -fr /tmp/chk_ts_$1.spl 

fi 



crsctl command to start and stop crs in Oracle RAC Database 


Hi DBAs,


We basically have the below command to stop/start crs in Oracle RAC Database.


crsctl command to stop and start the cluster on a specific node noderac1 & noderac2:



We need to login with 'root' or specific sudo user having permissions to run the crs commands.


/u01/app/11.2.0.3/grid/bin/crsctl stop crs

/u01/app/11.2.0.3/grid/bin/crsctl disable crs


/u01/app/11.2.0.3/grid/bin/crsctl enable crs

/u01/app/11.2.0.3/grid/bin/crsctl start crs


where /u01/app/11.2.0.3/grid =$GRID_HOME

 

crsctl command to stop and start on the Clusterware(noderac1 &noderac2) will be


/u01/app/11.2.0.3/grid/bin/crsctl stop cluster -all

 /u01/app/11.2.0.3/grid/bin/crsctl disable crs


/u01/app/11.2.0.3/grid/bin/crsctl enable crs

/u01/app/11.2.0.3/grid/bin/crsctl start resource -all


where /u01/app/11.2.0.3/grid =$GRID_HOME



Enjoy doing RAC tasks...


Thanks,


Rafi

Posted by Rafi at 3:40 PM 1 comment:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks 

Reactions:

How to check Pre-requistics Patches while applying Database Patch using opatch 

Hi DBAs,


Lot of time we apply Database patches using the opatch,It is mandatory to know the Pre-requistics patches for applying the Patch.We can find it out with the below steps.



Step 1:Go to the Directory where patch is copied(PATCH TOP)

>cd /home/oracle/patch/18308717


Step 2:Execute below command

>$ORACLE_HOME/OPatch/opatch prereq CheckConflictAgainstOHWithDetail -ph ./

Oracle Interim Patch Installer version 11.2.0.3.6

Copyright (c) 2013, Oracle Corporation.  All rights reserved.


PREREQ session


Oracle Home       : /u01/app/oracle/product/11.2.0.3/dbhome_1

Central Inventory : /u01/app/oraInventory

   from           : /u01/app/oracle/product/11.2.0.3/dbhome_1/oraInst.loc

OPatch version    : 11.2.0.3.6

OUI version       : 11.2.0.3.0

Log file location : /u01/app/oracle/product/11.2.0.3/dbhome_1/cfgtoollogs/opatch/opatch2014-07-24_17-52-50PM_1.log


Invoking prereq "checkconflictagainstohwithdetail"


Prereq "checkConflictAgainstOHWithDetail" passed.


OPatch succeeded.

node1(TESTDB1)  /home/oracle/patch/18308717

>ls -altr

total 24

-rw-rw-r-- 1 oracle oinstall 5905 May 14 03:35 README.txt

drwxr-xr-x 3 oracle oinstall 4096 May 14 03:35 files

drwxr-xr-x 4 oracle oinstall 4096 May 14 03:35 etc

drwxrw-rw- 3 oracle oinstall 4096 Jul 24 11:30 ..

drwxrwxr-x 4 oracle oinstall 4096 Jul 24 11:30 .

node1(TESTDB1)  /home/oracle/patch/18308717



Step 3:Verify the Log


>vi /u01/app/oracle/product/11.2.0.3/dbhome_1/cfgtoollogs/opatch/opatch2014-07-24_17-52-50PM_1.log


[Jul 24, 2014 5:52:50 PM]    PREREQ session

[Jul 24, 2014 5:52:50 PM]    OPatch invoked as follows: 'prereq CheckConflictAgainstOHWithDetail -ph ./ -invPtrLoc /u01/app/oracle/product/11.2.0.3/dbhome_1/oraInst.loc '

[Jul 24, 2014 5:52:50 PM]    OUI-67077:

                             Oracle Home       : /u01/app/oracle/product/11.2.0.3/dbhome_1

                             Central Inventory : /u01/app/oraInventory

                                from           : /u01/app/oracle/product/11.2.0.3/dbhome_1/oraInst.loc

                             OPatch version    : 11.2.0.3.6

                             OUI version       : 11.2.0.3.0

                             OUI location      : /u01/app/oracle/product/11.2.0.3/dbhome_1/oui

                             Log file location : /u01/app/oracle/product/11.2.0.3/dbhome_1/cfgtoollogs/opatch/opatch2014-07-24_17-52-50PM_1.log

[Jul 24, 2014 5:52:50 PM]    Patch history file: /u01/app/oracle/product/11.2.0.3/dbhome_1/cfgtoollogs/opatch/opatch_history.txt

[Jul 24, 2014 5:52:50 PM]    Invoking prereq "checkconflictagainstohwithdetail"

[Jul 24, 2014 5:52:51 PM]    Patch 18308717 is not subset of any other patch processed till now

[Jul 24, 2014 5:52:51 PM]    Patch ID  18308717

[Jul 24, 2014 5:52:51 PM]    Patches that conflict: [  ]

[Jul 24, 2014 5:52:51 PM]    Checking conflicts for patch: 18308717

[Jul 24, 2014 5:52:51 PM]    Checking conflicts/supersets for patch: 18308717 with patch:18308717

[Jul 24, 2014 5:52:51 PM]    Checking conflicts/supersets for patch: 18308717 with patch:17076717

[Jul 24, 2014 5:52:51 PM]    Checking conflicts/supersets for patch: 18308717 with patch:17296419

[Jul 24, 2014 5:52:51 PM]    Checking conflicts/supersets for patch: 18308717 with patch:16475860

[Jul 24, 2014 5:52:51 PM]    Prereq "checkConflictAgainstOHWithDetail" passed.

[Jul 24, 2014 5:52:51 PM]    Finishing PrereqSession at Thu Jul 24 17:52:51 IST 2014 


Message:Patch 18308717 is not subset of any other patch processed till now 

From the above logs we can confirm that there are no-prerequistics patches to be applied before applying 18308717.



Enjoy doing DBA tasks...


Happy DBA tasks..


Thanks,


Rafi 











Posted by Rafi at 3:30 PM 3 comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: Database Patches, DBA tasks 

Reactions:

Sunday, June 29, 2014

Killing multiple sessions in Oracle Database 

Hi,


We often get task to kill sessions which are active,inactive and the session related with a particular program.When we have one or two sessions we can kill it easily,if we have multiple sessions we can do it as follows:



Step 1:Check the Name of the Database

sqlplus "/as sysdba"


SQL>Select name from v$database;


Step 2:How to kill all inactive session in Database


sqlplus "/as sysdba"


SQL>set heading off


SQL>spool kill12.sql


SELECT 'ALTER SYSTEM KILL SESSION '||''''||sid ||','|| serial#||''''||' immediate;'

 FROM v$session

 WHERE status ='INACTIVE' and type != 'BACKGROUND';


SQL>spool off


then execute the sql script


SQL> @kill12.sql


Note:For RAC Database where we have more than 1 instances,we need to follow step 2 on all the Oracle instances.


How to kill all active session in Database:

sqlplus "/as sysdba"


SQL>set heading off

SQL>spool kill_active.sql


SELECT 'ALTER SYSTEM KILL SESSION '||''''||sid ||','|| serial#||''''||' immediate;'

FROM v$session

WHERE status ='ACTIVE' and type != 'BACKGROUND';


SQL>spool off


Note:For RAC Database where we have more than 1 instances,we need to follow step 2 on all the Oracle instances. 



How to kill all ODI sessions in Database:

sqlplus "/as sysdba"


SQL>set heading off

SQL>set lines 1000

SQL>set linesize 2000

SQL>spool kill_active.sql


SELECT 'ALTER SYSTEM KILL SESSION '||''''||sid ||','|| serial#||''''||' immediate;'

FROM v$session

WHERE status ='ACTIVE' AND USERNAME LIKE '%ODI%';


SQL>spool off



Note:For RAC Database where we have more than 1 instances,we need to follow step 2 on all the Oracle instances.



How to get the list of Users and Processes running ODI sessions:


SQL> SET LINESIZE 100

COLUMN spid FORMAT A10

COLUMN username FORMAT A10

COLUMN program FORMAT A45


SELECT s.inst_id,s.sid,s.serial#,p.spid, s.username, s.program

 FROM   gv$session s

 JOIN gv$process p ON p.addr = s.paddr AND p.inst_id = s.inst_id

WHERE  s.type != 'BACKGROUND' AND S.USERNAME LIKE '%ODI%';  


 How to kill a particular object blocking session:


 1.Find the tables(objects) which are locked:


SELECT o.owner, o.object_name, o.object_type, o.last_ddl_time, o.status, l.session_id, l.oracle_username, l.locked_mode

FROM dba_objects o, gv$locked_object l

WHERE o.object_id = l.object_id and o.object_name='XX_OBJECT';


2.Killing the session holding the lock:


--Find the serial# for the sessions holding the lock:


SQL> select SERIAL# from v$session where SID=667;


SERIAL#

----------

21091


SQL> alter system kill session '667,21091';


Note:For RAC Database where we have more than 1 instances,we need to follow step 2 on all the Oracle instances.


Enjoy DBA tasks...


Happy DBA learning..



Best regards,


Rafi






Posted by Rafi at 11:30 AM 2 comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA Queries, DBA tasks 

Reactions:

Wednesday, June 18, 2014

Reorganization of table in Oracle Database 

Hi All,



As we all know reorganization help in better disk space management and performance management in oracle Database,so this task is often done in companies for Tables,Tablespaces and Databases.It helps to reduce the fragmentation( which affects the performance of the Oracle).


Below are the Steps I follow for doing reorganization:


Step 1:Check the last analyzed, tables size before Reorganization:



-- Query 1:Check the last Analyzed and number of rows:

SQL> select name from v$database;


NAME

---------

TESTDB


SQL> SELECT  OWNER||' '||TABLE_NAME||' '||LAST_ANALYZED||' '|| PARTITIONED||' '||NUM_ROWS  FROM DBA_TABLES WHERE TABLE_NAME='ORDER_TAB';


OWNER||''||TABLE_NAME||''||LAST_ANALYZED||''||PARTITIONED||''||NUM_ROWS

--------------------------------------------------------------------------------

ORD_SCHEMA ORDER_TAB 06-JUN-14 NO 71374990



--Query 2 :Check the size of the table


SELECT OWNER||' '||SEGMENT_NAME||' '||BYTES/1024/1024||' '||BLOCKS FROM DBA_SEGMENTS WHERE SEGMENT_NAME='ORDER_TAB';


OWNER||''||SEGMENT_NAME||''||BYTES/1024/1024||''||BLOCKS

--------------------------------------------------------------------------------

ORD_SCHEMA ORDER_TAB 41643 5330304



Note: top -c enter 1 to get the list of cpus =>8 we can give parallel upto 16



ALTER INDEX APPS.XHL_ORDER_TAB_NU5 REBUILD NOLOGGING PARALLEL 12



Note:CUSTOM_SCHEMA,APPS and ORD_SCHEMA are the important schemas used.


Step 2: Get the list of all the INDEXES associated with the tables used for Reorganization.


set lines 2000

set linesize 2000

select 'alter index ORD_SCHEMA.'||index_name||' rebuild NOLOGGING parallel 12;' from dba_indexes where table_name='ORDER_TAB';



If db is up:

=================


select 'alter index ORD_SCHEMA.'||index_name||' rebuild online NOLOGGING parallel 12;' from dba_indexes where table_name='ORDER_TAB';



INDEXES NEEDS TO BE REBUILDED:

=================================


--alter index ORD_SCHEMA.ORDER_TAB_NU17 rebuild online parallel 12;

--alter index ORD_SCHEMA.ORDER_TAB_F2 rebuild online parallel 12;

--alter index ORD_SCHEMA.ORDER_TAB_N18 rebuild online parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_NU3 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N10 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N11 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N12 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_U1 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N6 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_U2 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_NU1 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N9 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_NU4 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N1 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_NU2 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N7 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N8 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N2 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N3 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N4 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N5 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_NU5 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_R1 rebuild parallel 12;




Step 3:Make sure to make it noparallel and  logging after indexes are rebuild:


Make it no parallel to release the CPU channels


select 'alter index ORD_SCHEMA.'||index_name||' noparallel logging;' from dba_indexes where table_name='ORDER_TAB';



Step 3:Move the tables to reduce fragmentation.


SQL> spool move_table_10June2014.out

SQL> ALTER TABLE ORD_SCHEMA.ORDER_TAB MOVE;

SQL> spool off


Table altered.




Step 4: Rebuild indexes online using VNC session(vncserver) 


We need to rebuild indexes using VNC session or run it in the form of shell scripts using nohup as this operation takes time.


spool rebuild_10Jun2014.out

alter index ORD_SCHEMA.ORDER_TAB_NU17 rebuild parallel 12;

alter index ORD_SCHEMA.ORDER_TAB_F2 rebuild parallel 12;

alter index ORD_SCHEMA.ORDER_TAB_N18 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_NU3 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N10 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N11 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N12 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_U1 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N6 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_U2 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_NU1 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N9 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_NU4 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N1 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_NU2 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N7 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N8 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N2 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N3 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N4 rebuild parallel 12;

alter index ORD_SCHEMA.OE_ORDER_HEADERS_N5 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_NU5 rebuild parallel 12;

alter index ORD_SCHEMA.XHL_ORDER_TAB_R1 rebuild parallel 12;

spool off



Step 5:Check index status again after rebuild


Check index status again after rebuild,if they are in unusable status for some reason,please rebuild them again and make sure

it comes to VALID Status.




SQL> select owner, index_name,STATUS from dba_indexes where status='UNUSABLE' and table_name='ORDER_TAB';


OWNER                          INDEX_NAME                     STATUS

------------------------------ ------------------------------ --------

APPS                           ORDER_TAB_NU17      UNUSABLE

ORD_SCHEMA                            ORDER_TAB_F2        UNUSABLE

ORD_SCHEMA                            XHL_ORDER_TAB_NU3   UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N10           UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N11           UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N12           UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_U1            UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N6            UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_U2            UNUSABLE

ORD_SCHEMA                            XHL_ORDER_TAB_NU1   UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N9            UNUSABLE


OWNER                          INDEX_NAME                     STATUS

------------------------------ ------------------------------ --------

CUSTOM_SCHEMA                           XHL_ORDER_TAB_NU4   UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N1            UNUSABLE

ORD_SCHEMA                            XHL_ORDER_TAB_NU2   UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N7            UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N8            UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N2            UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N3            UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N4            UNUSABLE

ORD_SCHEMA                            OE_ORDER_HEADERS_N5            UNUSABLE

APPS                           XHL_ORDER_TAB_NU5   UNUSABLE

ORD_SCHEMA                            XHL_ORDER_TAB_R1    UNUSABLE


22 rows selected.



To check unusable indexes for table:

==========================================


SQL> select owner, index_name,STATUS from dba_indexes where status='UNUSABLE' and table_name='ORDER_TAB';


no rows selected


SQL>





Step 6:Make it no parallel and release the channels:


select 'alter index ORD_SCHEMA.'||index_name||' noparallel logging;' from dba_indexes where table_name='ORDER_TAB';






Step 7:Check the status of INDEX after rebuild

Check the status of INDEX after rebuild,make sure it is in valid state or else rebuild the INDEX again.

SELECT INDEX_NAME||' '||INDEX_TYPE||' '||STATUS

FROM USER_INDEXES WHERE TABLE_NAME IN('PS_TEST_WL','PSWORKLIST','PS_TEST_USERINST','PS_TEST_STEPINST','PS_TEST_TAUTH_AW','PS_TEST_ADV_AW','PS_TEST_SHEET_AW');



Step 8:Run the gather stats for the tables in VNC session:


We run gather statistics to choose the best posible execution plan.


spool gather_stats_tables_TESTDB.out




sqlplus "/as sysdba"


SQL>


Begin DBMS_STATS.GATHER_TABLE_STATS(

ownname => 'ORD_SCHEMA',

tabname => 'ORDER_TAB',

degree => 10,

cascade => TRUE,

METHOD_OPT => 'FOR COLUMNS SIZE AUTO',

estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE);

END;

/



Step 9:Verify the number of rows,statistics ,tables size after reorganization


SQL> select name from v$database;


NAME

---------

TESTDB


SQL> SELECT  OWNER||' '||TABLE_NAME||' '||LAST_ANALYZED||' '|| PARTITIONED||' '||NUM_ROWS  FROM DBA_TABLES WHERE TABLE_NAME='ORDER_TAB';


OWNER||''||TABLE_NAME||''||LAST_ANALYZED||''||PARTITIONED||''||NUM_ROWS

--------------------------------------------------------------------------------

ORD_SCHEMA ORDER_TAB 11-JUN-14 NO 71348700


SQL> SELECT OWNER||' '||SEGMENT_NAME||' '||BYTES/1024/1024||' '||BLOCKS FROM DBA_SEGMENTS WHERE SEGMENT_NAME='ORDER_TAB';


OWNER||''||SEGMENT_NAME||''||BYTES/1024/1024||''||BLOCKS

--------------------------------------------------------------------------------

ORD_SCHEMA ORDER_TAB 42534 5444352





Hope it helps.Enjoy DBA tasks...




Thanks,


Rafi

Posted by Rafi at 4:54 AM 9 comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks, Performance Tuning 

Reactions:

Saturday, December 21, 2013

Reorganization of Tables and Indexes in Oracle Database 

Hi DBAs,


Reorganization is very useful and important tasks DBAs perform in order to reduce space used by blocks and it also helps in improving the performance of the Oracle Database.It also helps to reduce the fragmentation.


 There are 3 ways to do:


1.Export/Import

2.Alter table Move

3.CTAS method(Create table table_name2 as Select *from table_name1)


 I followed Step 2 of the above ways.


 Below are the Steps I follow for doing reorganization:


Step 1:Check the last analyzed, tables size before Reorganization

Query 1:Check the last Analyzed and number of rows

SELECT  OWNER||' '||TABLE_NAME||' '||LAST_ANALYZED||' '|| PARTITIONED||' '||NUM_ROWS  FROM DBA_TABLES WHERE TABLE_NAME IN('PS_TEST_WL','PSWORKLIST','PS_TEST_USERINST','PS_TEST_STEPINST','PS_TEST_TAUTH_AW','PS_TEST_ADV_AW','PS_TEST_SHEET_AW');

Query 2:Check the size of the table before Reorganization

SQL>  SELECT OWNER||' '||SEGMENT_NAME||' '||BYTES/1024/1024 FROM DBA_SEGMENTS

     WHERE SEGMENT_NAME IN('PS_TEST_WL','PSWORKLIST','PS_TEST_USERINST','PS_TEST_STEPINST','PS_TEST_TAUTH_AW','PS_TEST_ADV_AW','PS_TEST_SHEET_AW');


Step 2: Get the list of all the INDEXes associated with the tables used for Reorganization.

SET LINES 2000

SET LINESIZE 2000

SQL> select  OWNER||' '||table_name||' '||INDEX_name from DBA_INDEXES where table_name in ('PS_TEST_WL','PSWORKLIST','PS_TEST_USERINST','PS_TEST_STEPINST','PS_TEST_TAUTH_AW','PS_TEST_ADV_AW','PS_TEST_SHEET_AW');


OWNER||''||TABLE_NAME||''||INDEX_NAME

------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

SYSADM PS_TEST_WL PSATEST_WL

SYSADM PS_TEST_WL PSBTEST_WL

SYSADM PS_TEST_WL PS_TEST_WL

SYSADM PS_TEST_USERINST PSATEST_USERINST

SYSADM PS_TEST_USERINST PSBTEST_USERINST

SYSADM PS_TEST_USERINST PSCTEST_USERINST

SYSADM PS_TEST_USERINST PSDTEST_USERINST

SYSADM PS_TEST_USERINST PSETEST_USERINST

SYSADM PS_TEST_USERINST PS_TEST_USERINST

SYSADM PS_TEST_STEPINST PSBTEST_STEPINST

SYSADM PS_TEST_STEPINST PSCTEST_STEPINST


OWNER||''||TABLE_NAME||''||INDEX_NAME

------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

SYSADM PS_TEST_STEPINST PSDTEST_STEPINST

SYSADM PS_TEST_STEPINST PSETEST_STEPINST

SYSADM PS_TEST_STEPINST PSFTEST_STEPINST

SYSADM PS_TEST_STEPINST PSATEST_STEPINST

SYSADM PS_TEST_STEPINST PS_TEST_STEPINST

SYSADM PSWORKLIST PS_PSWORKLIST

SYSADM PSWORKLIST PS0PSWORKLIST

SYSADM PSWORKLIST PSAPSWORKLIST

SYSADM PSWORKLIST PSBPSWORKLIST

SYSADM PS_TEST_TAUTH_AW PS_TEST_TAUTH_AW

SYSADM PS_TEST_TAUTH_AW SYS_IL0000025105C00012$$


OWNER||''||TABLE_NAME||''||INDEX_NAME

------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

SYSADM PS_TEST_SHEET_AW PSATEST_SHEET_AW

SYSADM PS_TEST_SHEET_AW SYS_IL0000024994C00012$$

SYSADM PS_TEST_SHEET_AW PS_TEST_SHEET_AW

SYSADM PS_TEST_ADV_AW SYS_IL0000024456C00012$$

SYSADM PS_TEST_ADV_AW PS_TEST_ADV_AW


27 rows selected.



Step 3:Move the tables to reduce fragmentation.


SQL> spool move_tables.out

SQL> ALTER TABLE PS_TEST_WL MOVE;


Table altered.


SQL> ALTER TABLE PS_TEST_USERINST MOVE;


Table altered.


SQL> ALTER TABLE PS_TEST_STEPINST MOVE;


Table altered.


SQL> ALTER TABLE PSWORKLIST MOVE;


Table altered.


SQL> ALTER TABLE PS_TEST_TAUTH_AW MOVE;


Table altered.


SQL> ALTER TABLE PS_TEST_SHEET_AW MOVE;


Table altered.


SQL> ALTER TABLE PS_TEST_ADV_AW MOVE;


Table altered.


SQL> spool off



Step 4: Rebuild indexes online


spool rebuild.out


ALTER INDEX PSATEST_WL REBUILD ONLINE;

ALTER INDEX PSBTEST_WL REBUILD ONLINE;

ALTER INDEX PS_TEST_WL REBUILD ONLINE;

ALTER INDEX PSATEST_USERINST REBUILD ONLINE;

ALTER INDEX PSBTEST_USERINST REBUILD ONLINE;

ALTER INDEX PSCTEST_USERINST REBUILD ONLINE;

ALTER INDEX PSDTEST_USERINST REBUILD ONLINE;

ALTER INDEX PSETEST_USERINST REBUILD ONLINE;

ALTER INDEX PS_TEST_USERINST REBUILD ONLINE;

ALTER INDEX PSBTEST_STEPINST REBUILD ONLINE;

ALTER INDEX PSCTEST_STEPINST REBUILD ONLINE;

ALTER INDEX PSDTEST_STEPINST REBUILD ONLINE;

ALTER INDEX PSETEST_STEPINST REBUILD ONLINE;

ALTER INDEX PSFTEST_STEPINST REBUILD ONLINE;

ALTER INDEX PSATEST_STEPINST REBUILD ONLINE;

ALTER INDEX PS_TEST_STEPINST REBUILD ONLINE;

ALTER INDEX PSBPSWORKLIST REBUILD ONLINE;

ALTER INDEX PS0PSWORKLIST REBUILD ONLINE;

ALTER INDEX PSAPSWORKLIST REBUILD ONLINE;

ALTER INDEX PS_TEST_TAUTH_AW REBUILD ONLINE;

ALTER INDEX SYS_IL0000025105C00012$$ REBUILD ONLINE;

ALTER INDEX PSATEST_SHEET_AW REBUILD ONLINE;

ALTER INDEX SYS_IL0000024994C00012$$ REBUILD ONLINE;

ALTER INDEX PSATEST_SHEET_AW REBUILD ONLINE;

ALTER INDEX SYS_IL0000024994C00012$$ REBUILD ONLINE;

ALTER INDEX PS_TEST_ADV_AW REBUILD ONLINE;

spool off


Step 5:Check the status of INDEX after rebuild

Check the status of INDEX after rebuild,make sure it is in valid state or else rebuild the INDEX again.

SELECT INDEX_NAME||' '||INDEX_TYPE||' '||STATUS

FROM USER_INDEXES WHERE TABLE_NAME IN('PS_TEST_WL','PSWORKLIST','PS_TEST_USERINST','PS_TEST_STEPINST','PS_TEST_TAUTH_AW','PS_TEST_ADV_AW','PS_TEST_SHEET_AW');


Step 6:Run the gather stats for the tables.

spool gather_stats_tables_TESTDB.out


BEGIN

  DBMS_STATS.gather_table_stats(

    'SYSADM',

    'PS_TEST_WL',

    method_opt =>'for all columns size auto');

END;

/


BEGIN

  DBMS_STATS.gather_table_stats(

    'SYSADM',

    'PSWORKLIST',

    method_opt => 'for all columns size auto');

END;

/


BEGIN

  DBMS_STATS.gather_table_stats(

    'SYSADM',

    'PS_TEST_USERINST',

    method_opt => 'for all columns size auto');

END;

/


BEGIN

  DBMS_STATS.gather_table_stats(

    'SYSADM',

    'PS_TEST_STEPINST',

    method_opt => 'for all columns size auto');

END;

/


BEGIN

  DBMS_STATS.gather_table_stats(

    'SYSADM',

    'PS_TEST_TAUTH_AW',

    method_opt => 'for all columns size auto');

END;

/


BEGIN

  DBMS_STATS.gather_table_stats(

    'SYSADM',

    'PS_TEST_ADV_AW',

    method_opt => 'for all columns size auto');

END;

/

BEGIN

  DBMS_STATS.gather_table_stats(

    'SYSADM',

    'PS_TEST_SHEET_AW',

    method_opt => 'for all columns size auto');

END;

/


spool off


Step 7:Verify the statistics ,tables size after reorganization


SQL>  SELECT  OWNER||' '||TABLE_NAME||' '||LAST_ANALYZED||' '|| PARTITIONED||' '||NUM_ROWS  FROM DBA_TABLES

  2  WHERE TABLE_NAME IN('PS_TEST_WL','PSWORKLIST','PS_TEST_USERINST','PS_TEST_STEPINST','PS_TEST_TAUTH_AW','PS_TEST_ADV_AW','PS_TEST_SHEET_AW');


OWNER||''||TABLE_NAME||''||LAST_ANALYZED||''||PARTITIONED||''||NUM_ROWS

----------------------------------------------------------------------------------------------------

SYSADM PS_TEST_ADV_AW 28-NOV-13 NO 1716

SYSADM PS_TEST_SHEET_AW 28-NOV-13 NO 197699

SYSADM PS_TEST_TAUTH_AW 28-NOV-13 NO 128592

SYSADM PSWORKLIST 28-NOV-13 NO 936531

SYSADM PS_TEST_STEPINST 28-NOV-13 NO 292227

SYSADM PS_TEST_USERINST 28-NOV-13 NO 5111123

SYSADM PS_TEST_WL 28-NOV-13 NO 652895


7 rows selected.


SQL>  SELECT OWNER||' '||SEGMENT_NAME||' '||BYTES/1024/1024 FROM DBA_SEGMENTS

  2   WHERE SEGMENT_NAME IN('PS_TEST_WL','PSWORKLIST','PS_TEST_USERINST','PS_TEST_STEPINST','PS_TEST_TAUTH_AW','PS_TEST_ADV_AW','PS_TEST_SHEET_AW');


OWNER||''||SEGMENT_NAME||''||BYTES/1024/1024

----------------------------------------------------------------------------------------------------

SYSADM PS_TEST_ADV_AW .25

SYSADM PS_TEST_ADV_AW .125

SYSADM PS_TEST_SHEET_AW 46

SYSADM PS_TEST_SHEET_AW 31

SYSADM PS_TEST_TAUTH_AW 18

SYSADM PS_TEST_TAUTH_AW 4

SYSADM PSWORKLIST 168

SYSADM PS_TEST_STEPINST 18

SYSADM PS_TEST_STEPINST 19

SYSADM PS_TEST_USERINST 365

SYSADM PS_TEST_USERINST 93


OWNER||''||SEGMENT_NAME||''||BYTES/1024/1024

----------------------------------------------------------------------------------------------------

SYSADM PS_TEST_WL 88

SYSADM PS_TEST_WL 64


13 rows selected.


Enjoy DBA tasks and Performance tuning.



Best regards,


Rafi.




Posted by Rafi at 5:15 AM 1 comment:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks 

Reactions:

Sunday, January 20, 2013

WARNING: inbound connection timed out ORA-3136 

Hi,


Recently I was troubleshootiong for this error,finally resolved.First,let me explain what exactly it mean, the cause of this error,possible method of diagnosis and finally the resolution.


INBOUND_CONNECT_TIMEOUT: 

 The INBOUND_CONNECT_TIMEOUT is the parameter used to specify the time, in seconds, for the client to complete its connect request to the listener after the network connection had been established.

i.e The default timeout for a new connection to be established. This setting is called SQLNET.INBOUND_CONNECT_TIMEOUT.


 If the listener does not receive the client request in the time specified, then it terminates the connection.Check the listener.log for details.


In Oracle 9i,the value is set to unlimited and the new value in 10g is 60 seconds.In Oracle 11g also the default value is 60 seconds.


Causes:


1) Some Malicious client connections.  

2)  Connection takes a long time.

3)Database is heavily loaded and cannot process request in allotted time.


Diagnosis:


1)Check the alert log file and check from where the connection comes.

2)Check the listener is up & running.

3)Ping the server,make sure tnsping is working.


Resolution:


To identify the listener name and ORACLE_HOME,we can use the below command.


$ ps -eaf|grep tns

oracle    1643     1  0  2012 ?        04:51:49 /data01/home/oracle/product/10.2.0/bin/tnslsnr LISTENER -inherit

oracle   31682 16935  0 04:54 pts/0    00:00:00 grep tns



Check if the sqlnet.ora file is existing in $ORACLE_HOME/network/admin path,if not create one.


Adjusted the values for INBOUND_CONNECT_TIMEOUT at both listener and server side sqlnet.ora and reloaded the the listener configuration:


sqlnet.ora: SQLNET.INBOUND_CONNECT_TIMEOUT=60

listener.ora: INBOUND_CONNECT_TIMEOUT_LISTENER=60

where:Listener_name=>LISTENER

Default value for these is 60 seconds.


Below are the details:


LSNRCTL>set  INBOUND_CONNECT_TIMEOUT_LISTENER=60

[oracle@hostname admin]$ cat sqlnet.ora

#SQLNET.ORA Network Configuration File: /u01/home/oracle/product/10.2.0/network/admin/sqlnet.ora

#To eliminate inbound connection timeout

SQLNET.INBOUND_CONNECT_TIMEOUT=60


#sqlnet.authentication_services = (NONE)


#SQLNET.EXPIRE_TIME = 0


#SQLNET.ENCRYPTION_SERVER = requested


#SQLNET.ENCRYPTION_CLIENT = requested


NAMES.DIRECTORY_PATH= (TNSNAMES,hostname)

[oracle@s259722ch3el19 admin]$


[oracle@s259722ch3el19 admin]$ lsnrctl reload LISTENER


LSNRCTL for Linux: Version 10.2.0.1.0 - Production on 19-JAN-2013 05:01:25


Copyright (c) 1991, 2005, Oracle.  All rights reserved.


Connecting to (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.48.1.511)(PORT=1521)))

The command completed successfully 


Keep  monitoring - alerts have stopped for now.


If the alerts continous to come,than you have to increase the value of  INBOUND_CONNECT_TIMEOUT as follows:


sqlnet.ora: SQLNET.INBOUND_CONNECT_TIMEOUT=300

listener.ora: INBOUND_CONNECT_TIMEOUT_LISTENER=300

 where:Listener_name=>LISTENER


 Hope it help.


Happy DBA tasks and troubleshooting.



Best regards,


Rafi. 

Posted by Rafi at 2:08 AM 2 comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks, Oracle Errors 

Reactions:

Saturday, November 10, 2012

Oracle RAC 11gR2 crsctl all resource start and stop 



Hi DBAs,


Oracle RAC 11gR2 came up with new crsctl command which can be use to start particular resource like asm,listener

grid services or at a time we can start all resource and stop all resources.


Below are the commands to do:



crsctl start resource

    


crsctl start resource -all


crsctl stop resource

    


crsctl stop resource -all



Reference:http://docs.oracle.com/cd/E11882_01/rac.112/e16794/crsref.htm#CHDFEEEG




Hope it helps ...


Enjoy RAC DBA learning....



Best regards,



Rafi.

Posted by Rafi at 6:55 AM No comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks, RAC 

Reactions:

Monday, September 24, 2012

Killing session in Oracle RAC Database 

Hi DBAs,

          Whenever we find some locking issue in RAC Database and Developers requested to kill the session which is blocking and creating lock.,please make sure you are killing the correct session.Below steps can be very much useful.


Step 1:Find the Blocking sessionSQL> SET LINES 1000


SQL> select process,sid,blocking_session,BLOCKING_INSTANCE from gv$session

where blocking_session is not null;  2


PROCESS                         SID BLOCKING_SESSION BLOCKING_INSTANCE

------------------------ ---------- ---------------- -----------------

24822                           1139             5366                 1



1 rows selected.



Step 2:Check the Program which is blocking


SELECT s.inst_id,

       s.sid,

       s.serial#,

       p.spid,

       s.username,

       s.program

FROM   gv$session s

       JOIN gv$process p ON p.addr = s.paddr AND p.inst_id = s.inst_id

where s.sid=1139;


So ,1139 is the CALC program


Step 3:Find the Session details


SQL> select sid,serial#, INST_ID from gv$session where sid=' 1139';


       SID    SERIAL#    INST_ID

---------- ---------- ----------

      1139      56959          3

      

Step 4:Kill the session immediately


SQL> alter system kill session '1139,56959,@3'  immediate;


System altered.




Hope it helps...


Enjoy Oracle RAC DBA learning...



Best regards,


Rafi.








Posted by Rafi at 2:59 PM No comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks, RAC 

Reactions:

Sunday, August 26, 2012

Creating Oracle 11g Database manually in 11 steps in Unix-based Operating system 

Hi,

This is one of the most basic tasks for Oracle DBA,it also helps us to understand how Oracle Database works.Creating Database in 11g is very simple.In 10g you need to create additional directories bdump,cdump,udump instead of diagnostic dump directory.Below are the steps:



Step 1:Create Directory structure with Oracle ownership and permission as below:


[oracle@localhost ~]$ cd /u03


[oracle@localhost u03]$ mkdir testdb


[oracle@localhost u03]$ chmod -R 777 /u03/testdb/*


[oracle@localhost u03]$ chown -R oracle:oinstall /u03/testdb/*


[oracle@localhost testdb]$ mkdir adump diag flash_recovery_area


[oracle@localhost u03]$ chmod -R 777 /u03/testdb/*


[oracle@localhost u03]$ chown -R oracle:oinstall /u03/testdb/*



Step 2:Create Parameter file in $ORACLE_HOME/dbs location:


[oracle@localhost testdb]$ cd $ORACLE_HOME/dbs


[oracle@localhost dbs]$ vi init_testdb.ora


db_name='testdb'

memory_target=1G

processes = 150

audit_file_dest='/u03/testdb/adump'

audit_trail ='db'

db_block_size=8192

db_domain=''

db_recovery_file_dest='/u03/testdb/flash_recovery_area'

db_recovery_file_dest_size=2G

diagnostic_dest='/u03/testdb/diag'

dispatchers='(PROTOCOL=TCP) (SERVICE=testdb)'

open_cursors=300

remote_login_passwordfile='EXCLUSIVE'

undo_tablespace='UNDOTBS1'

# You may want to ensure that control files are created on separate physical

# devices

control_files = (/u03/testdb/ora_control1.ctl,/u03/testdb/ora_control2.ctl)

compatible ='11.2.0'



Step 3:Prepare Create Database script :


[oracle@localhost u03]$ cd /u03/testdb/


[oracle@localhost testdb]$ vi createdb_shaik.sql


CREATE DATABASE testdb

MAXLOGFILES 5

MAXLOGMEMBERS 5

MAXDATAFILES 100

MAXINSTANCES 1

LOGFILE

GROUP 1 '/u03/testdb/redo1.log' SIZE 10M,

GROUP 2 '/u03/testdb/redo2.log' SIZE 10M,

GROUP 3 '/u03/testdb/redo3.log' SIZE 10M

DATAFILE

'/u03/testdb/system.dbf' size 200M REUSE

sysaux datafile '/u03/testdb/sysaux.dbf' size 100m

undo tablespace UNDOTBS1

datafile '/u03/testdb/undo1.dbf' size 100m

DEFAULT TEMPORARY TABLESPACE temp1

TEMPFILE '/u03/testdb/temp01.dbf'

SIZE 100M REUSE

CHARACTER SET AL32UTF8

;

:wq


Step 4:Set the Oracle ENVIRONMENT and SID of Database in the Operating System:


[root@localhost testdb]# su - oracle


[oracle@localhost ~]$ vi .bash_profile


# .bash_profile


# Get the aliases and functions

if [ -f ~/.bashrc ]; then

        . ~/.bashrc

fi


# User specific environment and startup programs


PATH=$PATH:$HOME/bin


export PATH

export ORACLE_BASE=/u01/app/oracle

export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/dbhome_1

export PATH=$ORACLE_HOME/bin:$PATH

export LD_LIBRARY_PATH=$ORACLE_HOME/lib

export ORACLE_SID=orcl

export TNS_ADMIN=/u01/app/oracle/product/11.2.0/dbhome_1/network/admin


:wq

[oracle@localhost ~]$ . .bash_profile

     This will set the Oracle Environment variables in Unix-based operating system.


[oracle@localhost ~]$export ORACLE_SID=testdb

     This will set the SID of the current Database in Unix-based operating system.



Step 5:Create the Password file.


[oracle@localhost ~]$orapwd file=$ORACLE_HOME/dbs/orapwtestdb password=Oracle entries=10



Step 6:Create server parameter file.


[oracle@localhost dbs]$ sqlplus "/as sysdba"


SQL*Plus: Release 11.2.0.1.0 Production on Wed Mar 28 14:08:02 2012


Copyright (c) 1982, 2009, Oracle.  All rights reserved.


Connected to an idle instance.


SQL>create spfile from pfile='$ORACLE_HOME/dbs/init_testdb.ora';



step 7:Start the Database in nomount State.


SQL> startup nomount

ORACLE instance started.


Total System Global Area 1071333376 bytes

Fixed Size                  1341312 bytes

Variable Size             620759168 bytes

Database Buffers          444596224 bytes

Redo Buffers                4636672 bytes

SQL> select status from v$instance;


STATUS

------------

STARTED


Note:Common issue memory_target not supported,refer the below link for resolving:


http://rafioracledba.blogspot.in/2011/06/ora-00845-memorytarget-not-supported-on.html


Step 8:Execute Create Database script created in Step 3


SQL> @/u03/testdb/createdb_shaik.sql


Database created


Step 9:Execute the catalog.sql,catproc.sql and pupbld.sql scripts:

a)catalog.sql =>Creates dictionary tables and views

b)catproc.sql =>Creates PL/SQL procedures,functions and packages necessary.

c)pupbld.sql  =>Creates user profiles.



So our database is created. Now just run the catalog.sql,catproc.sql and pupbld.sql scripts.

WE will find catalog.sql and catproc.sql in $ORACLE_HOME/rdbms/admin path and pupbld.sql in $ORACLE_HOME/sqlplus/admin path.


SQL> @$ORACLE_HOME/rdbms/admin/catalog.sql

SQL> @$ORACLE_HOME/rdbms/admin/catproc.sql

conn system/manager


SQL>@$ORACLE_HOME/sqlplus/admin/pupbld.sql


SQL> alter user system identified by manager;


User altered.


SQL> conn system

Enter password: 

Connected.

SQL> @$ORACLE_HOME/sqlplus/admin/pupbld.sql

DROP SYNONYM PRODUCT_USER_PROFILE

             *

ERROR at line 1:

ORA-01434: private synonym to be dropped does not exist



  DATE_VALUE FROM PRODUCT_USER_PROFILE

                  *

ERROR at line 3:

ORA-00942: table or view does not exist



DROP TABLE PRODUCT_USER_PROFILE

           *

ERROR at line 1:

ORA-00942: table or view does not exist



ALTER TABLE SQLPLUS_PRODUCT_PROFILE ADD (LONG_VALUE LONG)

*

ERROR at line 1:

ORA-00942: table or view does not exist




Table created.


DROP TABLE PRODUCT_PROFILE

           *

ERROR at line 1:

ORA-00942: table or view does not exist



DROP VIEW PRODUCT_PRIVS

*

ERROR at line 1:

ORA-00942: table or view does not exist




View created.



Grant succeeded.


DROP PUBLIC SYNONYM PRODUCT_PROFILE

                    *

ERROR at line 1:

ORA-01432: public synonym to be dropped does not exist




Synonym created.


DROP SYNONYM PRODUCT_USER_PROFILE

             *

ERROR at line 1:

ORA-01434: private synonym to be dropped does not exist




Synonym created.


DROP PUBLIC SYNONYM PRODUCT_USER_PROFILE

                    *

ERROR at line 1:

ORA-01432: public synonym to be dropped does not exist


Synonym created.



Step 10:Verify the Dictionary views created.



SQL> select name from v$database;



Step 11:Change the Database mode from noarchive log to archive log mode

Changing to archive log mode:


--------------------------------



SQL> archive log list

Database log mode              No Archive Mode

Automatic archival             Disabled

Archive destination            USE_DB_RECOVERY_FILE_DEST

Oldest online log sequence     1

Current log sequence           3

SQL> shut immediate

Database closed.

Database dismounted.

ORACLE instance shut down.

SQL> startup mount

ORACLE instance started.


Total System Global Area 1071333376 bytes

Fixed Size                  1341312 bytes

Variable Size             620759168 bytes

Database Buffers          444596224 bytes

Redo Buffers                4636672 bytes

Database mounted.

SQL> alter database archivelog;


Database altered.


SQL> select status from v$instance;


STATUS

------------

MOUNTED


SQL> alter database open;


Database altered.


SQL> select status from v$instance;


STATUS

------------

OPEN


SQL> archive log list

Database log mode              Archive Mode

Automatic archival             Enabled

Archive destination            USE_DB_RECOVERY_FILE_DEST

Oldest online log sequence     1

Next log sequence to archive   3

Current log sequence           3


This completes the manual Database creation on Unix based Operating system....


Enjoy learning Oracle DBA....



Best regards,


Rafi.




































Posted by Rafi at 1:53 AM 11 comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks 

Reactions:

Sunday, July 29, 2012

Working with FLASHBACK in oracle Database 




FLASHBACK in Oracle DATABASE:

=============================


  Flashback is nothing but going to previous state,same case with Oracle

Database also,here we go to a previous state of Database.


Advantages and Applications of Flashback:

--------------------------------------------


1)Useful in recovering the lost data.

2)Useful in saving time and system resources.

2)Usefull in performing Data testing.


Disadvantages:

---------------


1)Additional Overhead on Database



prerequisites:

---------------


prerequisite 1: Configure FRA(Flash Recovery Area)


    SQL> alter system set db_recovery_file_dest_size=1g;


    System altered.


    SQL> alter system set db_recovery_file_dest=’/oracle/test_flash’;


    System altered.


Note: If it is a RAC database the FRA should point to the shared storage.

Ensure you estimate appropriately db_recovery_file_dest_size and have enough space.

 STORAGE_SIZE column from V$RESTORE_POINT can help you for that.


prerequisite 2:The Oracle database should be in archivelog


SQL> archive log list

Database log mode              Archive Mode

Automatic archival             Enabled

Archive destination            USE_DB_RECOVERY_FILE_DEST

Oldest online log sequence     70

Next log sequence to archive   73

Current log sequence           73


Note:Below steps can be use to switch Database to archive log mode.


    [oracle@orclbox ~]$ sqlplus /nolog


    SQL*Plus: Release 11.2.0.2.0 Production


    Copyright (c) 1982, 2010, Oracle.  All rights reserved.


    SQL> conn / as sysdba

    Connected.

    SQL> alter system set log_archive_dest_1=’location=/oracle/test_flash’;


    System altered.


    SQL> shutdown immediate

    Database closed.

    Database dismounted.

    ORACLE instance shut down.

    SQL> startup mount

    ORACLE instance started.


    Total System Global Area 1653518336 bytes

    Fixed Size                  2227032 bytes

    Variable Size            1275069608 bytes

    Database Buffers          369098752 bytes

    Redo Buffers                7122944 bytes

    Database mounted.

    SQL> alter database archivelog;


    Database altered.


    SQL> alter database open;


    Database altered.


    SQL> archive log list

    Database log mode              Archive Mode

    Automatic archival             Enabled

    Archive destination            /oracle/test_flash

    Oldest online log sequence     2

    Next log sequence to archive   4

    Current log sequence           4

    SQL>



prerequisite 2:On the Flashback in Database


SQL> alter database flashback on;


Database altered.


SQL> select log_mode,flashback_on from v$database;


LOG_MODE     FLASHBACK_ON


------------ ------------------


ARCHIVELOG   YES


Note:To off the flashback use the below statement.


SQL> alter database flashback off;


Database altered.


SQL> select log_mode,flashback_on from v$database;


LOG_MODE     FLASHBACK_ON


------------ ------------------


ARCHIVELOG   NO




Example 1: Let us consider one eg.of restoring a table to a particular scn(system change number):


Below i'm creating one 'test' user with some test data,than I will check the scn after committing the changes,than I will delete the data and finally I will restore that table using flashback scn technique.


SQL> create user test identified by test;


User created.


SQL> grant connect,resource to test;


Grant succeeded



 SQL> insert into test.flashback_test values(1);


    1 row created.


    SQL> insert into test.flashback_test values(2);


    1 row created.


    SQL> insert into test.flashback_test values(3);


    1 row created.


    SQL> commit;


    SQL> select current_scn, scn_to_timestamp(current_scn) from v$database;


CURRENT_SCN SCN_TO_TIMESTAMP(CURRENT_SCN)

----------- ---------------------------------------------------------------------------

     931892 28-JUL-12 09.49.40.000000000 AM



    SQL> delete from test.flashback_test;


    3 rows deleted.


    SQL> commit;


    Commit complete.


    SQL> select * from test.flashback_test;


    no rows selected



Restore of table flashback_test using flashback database:

=====================================================


 step 1:Shut down the instances of Database


SQL> shut immediate

Database closed.

Database dismounted.

ORACLE instance shut down


Step 2:Bring the Database in mount state

SQL> startup mount

ORACLE instance started.


Total System Global Area 1071333376 bytes

Fixed Size                  1341312 bytes

Variable Size             620759168 bytes

Database Buffers          444596224 bytes

Redo Buffers                4636672 bytes

Database mounted.


Step 3:Use flashback database command to go to scn when table was dropped.


If  you want to revert the database to SCN=930717 where flashback_test table has 3 rows


SQL> flashback database to scn 930717;


Flashback complete.


    SQL> alter database open resetlogs;

--set log sequence no. and the database will be synchonized.


Verify the Data:


    SQL> select * from flashback_test;


    COL1

    ———-

    1

    2

    3


Example 2:Flashback to timestamp


Lets say you have to drop some table on a particular day and the user know the time when he had dropped that table,than we can use the below steps to recover that table using flashback technique.


flashshback to timestamp:

======================


step 1:Shut down the instances of Database


SQL> shut immediate

Database closed.

Database dismounted.

ORACLE instance shut down


Step 2:Bring the Database in mount state

SQL> startup mount

ORACLE instance started.


Total System Global Area 1071333376 bytes

Fixed Size                  1341312 bytes

Variable Size             620759168 bytes

Database Buffers          444596224 bytes

Redo Buffers                4636672 bytes

Database mounted.



step 3: flashback to timestamp 28-JUL-12 09.49.40


SQL>flashback database to timestamp TO_TIMESTAMP('2012-07-28 09:49:40','YYYY-MM-DD HH24:MI:SS');


    Flashback complete.


    SQL> select * from test.flashback_test;


    COL1

    ———-

    1

    2

    3


Example 3: Enable recyclebin and get back the lost data.


Step 1:Enable recycle bin when the Database is in open state.


SQL>alter session enable recyclebin=on;


Step 2:Recover lost table using below command.

  

    SQL> FLASHBACK TABLE test.flashback_test TO BEFORE DROP;


    Flashback complete.


    SQL> select * from test.flashback_test;


    COL1

    ———-

    1

    2

    3


Example 4:Create guaranteed restore point and restore that restore point:

========



How we can restore the entire data which is lost in a particular period,this can be use to test data also.

 This is very vital feature of Oracle flashback.


Create Restore point:

=====================


Step 1:Create restore point 'test_rest1':


    SQL> create restore point test_rest1 guarantee flashback database;


    Restore point created.


    * To view the restore points:


    SQL> select NAME,SCN,TIME,GUARANTEE_FLASHBACK_DATABASE, STORAGE_SIZE

         from V$RESTORE_POINT

         where GUARANTEE_FLASHBACK_DATABASE='YES';



SQL> select NAME, SCN, TIME, GUARANTEE_FLASHBACK_DATABASE, STORAGE_SIZE

         from V$RESTORE_POINT

         where GUARANTEE_FLASHBACK_DATABASE='YES';  2    3


NAME

--------------------------------------------------------------------------------

       SCN

----------

TIME                                                                        GUA

--------------------------------------------------------------------------- ---

STORAGE_SIZE

------------

TEST_REST1

    930537

28-JUL-12 08.57.51.000000000 AM                                             YES

           0



NAME

--------------------------------------------------------------------------------

       SCN

----------

TIME                                                                        GUA

--------------------------------------------------------------------------- ---

STORAGE_SIZE

------------

TEST_REST2

    930660

28-JUL-12 09.02.54.000000000 AM                                             YES

    30203904



NAME

--------------------------------------------------------------------------------

       SCN

----------

TIME                                                                        GUA

--------------------------------------------------------------------------- ---

STORAGE_SIZE

------------

TEST1

    932549

28-JUL-12 10.30.01.000000000 AM                                             YES

     8192000


Restore Restore point:

=======================


step 1:shut the database


SQL> shut immediate

ORA-01031: insufficient privileges

SQL> conn /as sysdba

Connected.

SQL> shut immediate

Database closed.

Database dismounted.

ORACLE instance shut down


step 2:Startup in mount state


SQL> startup mount

ORACLE instance started.


Total System Global Area 1071333376 bytes

Fixed Size                  1341312 bytes

Variable Size             620759168 bytes

Database Buffers          444596224 bytes

Redo Buffers                4636672 bytes

Database mounted.



step 3:flashback Database to restore point 'test1':


SQL> FLASHBACK database TO RESTORE POINT test1;


Flashback complete


Note:To restore a table below command can be used.

FLASHBACK TABLE emp TO RESTORE POINT test1;



step 4:Open the Database with resetlogs


SQL> alter database open resetlogs;


Database altered


step 6:Verify the data


SQL> select *from test.flashback_test;


      COL1

----------

         1

         2

         3


Step 7:Monitor flashback  v$flashback_database_log


    SQL> select oldest_flashback_scn from v$flashback_database_log;


OLDEST_FLASHBACK_SCN

--------------------

              930314


Note: Dropping restore point.


SQL> DROP RESTORE POINT TEST_REST1;


Restore point dropped.


Note:

Possible flashback options available are:


SQL>flashback table test.flashback_test to (SYSDATE-1/24);


-- be sure to substitute your SCN

FLASHBACK DATABASE TO SCN 19513917;

or

FLASHBACK DATABASE TO RESTORE POINT bef_damage;


/*

FLASHBACK DATABASE TO TIMESTAMP (SYSDATE-1/24);


FLASHBACK DATABASE TO TIMESTAMP timestamp'2002-11-05 14:00:00';


FLASHBACK DATABASE

TO TIMESTAMP to_timestamp('2002-11-11 16:00:00','YYYY-MM-DD HH24:MI:SS');

*/



Offcourse,we do have flashback query,flashback transaction query,but above are very useful.



Hope it helps....


Enjoy DBA tasks and practice...




Best regards,


Rafi.













Posted by Rafi at 4:33 AM 1 comment:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks, flashback in Oracle 

Reactions:

Monday, December 26, 2011

Manually Installing Oracle Data Mining in Oracle 11g 

Hi,

If you compare Oracle 11g and previous versions there are lot of changes in terms of installing Data Mining component,

For installing check the MY ORACLE SUPPORT note :

How To Manually Install Data Mining In Oracle 11g? [ID 818314.1].


Information about data mining:

The brief information about Oracle Data mining can be obtained from the below link:


http://docs.oracle.com/html/B14339_01/1intro.htm


In short,If you do the Oracle installation with Enterprise Edition installation type ,it selects Oracle Data Mining option by default.In Oracle Database 11g,the Data Mining metadata is created with 'SYS' metadata when you select the Create Database option.


For verifying the installation of Oracle Data Mining:

Check if the parameter DATA MINING is set to TRUE,if it is than Oracle Data Mining component is already installed in the Database.The V$OPTION is very useful for checking the various functionality exist in your Database or not as seen from the below results:


SQL>SET LINESIZE 1000

SQL>SELECT *FROM V$OPTION;


PARAMETER VALUE

---------------------------------------------------------------- ----------------------------------------------------------------

Partitioning TRUE

Objects TRUE

Real Application Clusters FALSE

Advanced replication TRUE

Bit-mapped indexes TRUE

Connection multiplexing TRUE

Connection pooling TRUE

Database queuing TRUE

Incremental backup and recovery TRUE

Instead-of triggers TRUE

Parallel backup and recovery TRUE


PARAMETER VALUE

---------------------------------------------------------------- ----------------------------------------------------------------

Parallel execution TRUE

Parallel load TRUE

Point-in-time tablespace recovery TRUE

Fine-grained access control TRUE

Proxy authentication/authorization TRUE

Change Data Capture TRUE

Plan Stability TRUE

Online Index Build TRUE

Coalesce Index TRUE

Managed Standby TRUE

Materialized view rewrite TRUE


PARAMETER VALUE

---------------------------------------------------------------- ----------------------------------------------------------------

Materialized view warehouse refresh TRUE

Database resource manager TRUE

Spatial TRUE

Automatic Storage Management FALSE

Export transportable tablespaces TRUE

Transparent Application Failover TRUE

Fast-Start Fault Recovery TRUE

Sample Scan TRUE

Duplexed backups TRUE

Java TRUE

OLAP Window Functions TRUE


PARAMETER VALUE

---------------------------------------------------------------- ----------------------------------------------------------------

Block Media Recovery TRUE

Fine-grained Auditing TRUE

Application Role TRUE

Enterprise User Security TRUE

Oracle Data Guard TRUE

Oracle Label Security FALSE

OLAP TRUE

Basic Compression TRUE

Join index TRUE

Trial Recovery TRUE

Data Mining TRUE


PARAMETER VALUE

---------------------------------------------------------------- ----------------------------------------------------------------

Online Redefinition TRUE

Streams Capture TRUE

File Mapping TRUE

Block Change Tracking TRUE

Flashback Table TRUE

Flashback Database TRUE

Transparent Data Encryption TRUE

Backup Encryption TRUE

Unused Block Compression TRUE

Oracle Database Vault FALSE

Result Cache TRUE


PARAMETER VALUE

---------------------------------------------------------------- ----------------------------------------------------------------

SQL Plan Management TRUE

SecureFiles Encryption TRUE

Real Application Testing TRUE

Flashback Data Archive TRUE

DICOM TRUE

Active Data Guard TRUE

Server Flash Cache TRUE

Advanced Compression TRUE

XStream TRUE

Deferred Segment Creation TRUE



PRIVILEGE REQUIRED FOR DATA MINING SCHEMA:


Let us say,If you want to use Data Mining feature to a specific user,than we have to grant CREATE MINING MODEL privilege to that user.


SQL> conn dmuser

Enter password:

Connected.

SQL> select *from session_privs;


PRIVILEGE

----------------------------------------

CREATE SESSION

UNLIMITED TABLESPACE

CREATE TABLE

CREATE CLUSTER

CREATE SEQUENCE

CREATE PROCEDURE

CREATE TRIGGER

CREATE TYPE

CREATE OPERATOR

CREATE INDEXTYPE

CREATE MINING MODEL


11 rows selected.




Happy Oracle DBA tasks...



Best regards,


Rafi. 

Posted by Rafi at 12:00 AM 2 comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks, oracle11g 

Reactions:

Friday, November 25, 2011

How to tune oracle Database? 

Collecting and analyzing the AWR reports for Oracle Database:



Hi,

Tuning the Database is very much essential not only for better execution of SQL statement but also for applications running well using that Database.AWR(Automatic workload repository) report gives us clear picture in Deciding and tuning Database as well as SQL statement.As Enterprise Manager we can see it easily but Enterprise manager needs license(additional cost) so some companies does not want to use Oracle Enterprise Manager.Oracle gives us flexible option in which we can generated awr report in html and plain format.

It is better to generate in html format for seeing it clearly by using any web browser.Since performance tuning is a deep ocean I will be updating this thread based on my problems faced and the method which work fine for tuning purpose.



Collecting awr report from SQL prompt:


Login to the Database as 'sys' user(SYSDBA) and make sure Database is up & running and Oracle Environment for the particular Database

is set.we can gather the awr report using 'awrrpt.sql'.


Note:If we want the awr report in Oracle RAC environment than we have to use 'awrgrpt.sql' script,as there you have to gather the report for multiple instances running on various nodes.


SQL> select name from v$database;


NAME

---------

TESTDB


SQL> select status from v$instance;


STATUS

------------

OPEN


SQL> @?/rdbms/admin/awrrpt.sql


Current Instance

~~~~~~~~~~~~~~~~


DB Id DB Name Inst Num Instance

----------- ------------ -------- ------------

3628069655 TESTDB 1 TESTDB



Specify the Report Type

~~~~~~~~~~~~~~~~~~~~~~~

Would you like an HTML report, or a plain text report?

Enter 'html' for an HTML report, or 'text' for plain text

Defaults to 'html'

Enter value for report_type: html


Type Specified: html



Instances in this Workload Repository schema

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~


DB Id Inst Num DB Name Instance Host

------------ -------- ------------ ------------ ------------

* 3628069655 1 TESTDB TESTDB TESTNODE1.comp.com




Using 3628069655 for database Id

Using 1 for instance number



Specify the number of days of snapshots to choose from

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Entering the number of days (n) will result in the most recent

(n) days of snapshots being listed. Pressing without

specifying a number lists all completed snapshots.



Enter value for num_days: 1


Listing the last day's Completed Snapshots


Snap

Instance DB Name Snap Id Snap Started Level

------------ ------------ --------- ------------------ -----

TESTDB TESTDB 5590 24 Nov 2011 00:30 1

5591 24 Nov 2011 01:30 1

5592 24 Nov 2011 02:30 1

5593 24 Nov 2011 03:30 1

5594 24 Nov 2011 04:30 1

5595 24 Nov 2011 05:30 1

5596 24 Nov 2011 06:30 1

5597 24 Nov 2011 07:30 1

5598 24 Nov 2011 08:30 1

5599 24 Nov 2011 09:30 1

5600 24 Nov 2011 10:30 1

5601 24 Nov 2011 11:30 1

5602 24 Nov 2011 12:30 1

5603 24 Nov 2011 13:30 1

5604 24 Nov 2011 14:30 1

5605 24 Nov 2011 15:30 1



Specify the Begin and End Snapshot Ids

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Enter value for begin_snap: 5604

Begin Snapshot Id specified: 5604


Enter value for end_snap: 5605

End Snapshot Id specified: 5605



Specify the Report Name

~~~~~~~~~~~~~~~~~~~~~~~

The default report file name is awrrpt_1_5604_5605.html. To use this name,

press to continue, otherwise enter an alternative.


Enter value for report_name: awrrpt_NOV24_2011_2_30_3_30_PM.html



SQL> exit

We will see the html format of the awr report in the current operating system path.


[oracle@TESTNODE1 ~]$ ls -altr awr*

-rw-r--r-- 1 oracle dba 458371 Nov 24 14:02 awrrpt_1_5590_5603.html

-rw-r--r-- 1 oracle dba 390564 Nov 24 16:31 awrrpt_NOV24_2011_2_30_3_30_PM.html


We can copy this html file using copying tool(winscp or ftp) to our machine and review using web browsers(mozilla or IE supported

versions).



Analzing the awr report and suggesting possible recommendations:

Once we obtain the awr report our main motive is to analyze the awr report and come up with possible recommendations.Depending

on the size of our Production Database we can come up with possible recommendations.This recommendation should be first implemented in

test environment and after successful results should be adopted in production environments.


1) Redo logs:


We need to make sure our redo logs are large enough.Check the number of log switches, one every twenty minutes is ideal, more than this is too high and you should make them larger to reduce the number of switches.


We can find the log switches in the Instance Activity Stats part of the awr report.


Example:


Instance Activity Stats - Thread Activity


* Statistics identified by '(derived)' come from sources other than SYSSTAT


Statistic Total per Hour

log switches (derived) 2 2.00


We can see in this system there are 2 log swtiches per hourly basis,which is good.So this tells us the redo logs are large enough.


2)Parsing:


Check the hard parsing amount.It should be zero.If it is not, this indicates that our SGA is probably too small,increase the size of SGA and test again. 

Hard parsing is caused by use of literals in SQL (as opposed to bind variables).If the queries in question are our own,we should change them to use bind variables.


We can find this information on the first page.



Load Profile Per Second Per Transaction Per Exec Per Call

~~~~~~~~~~~~ --------------- --------------- ---------- ----------

...

Parses: 33.9 7.2

Hard parses: 0.5 0.1

...


We can see in this system the hard parses is almost zero, which is good.


Now coming to the SGA we can focus on the below considerations:


3)Buffer hit and Library hit percentages:


Check the buffer hit and library hit percentages. We want them to be 100%, if not we should increase the size of SGA.This is also on the first page:


Instance Efficiency Percentages (Target 100%)

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Buffer Nowait %: 99.82 Redo NoWait %: 100.00

Buffer Hit %: 99.52 In-memory Sort %: 100.00

Library Hit %: 98.63 Soft Parse %: 98.60

Execute to Parse %: 50.96 Latch Hit %: 98.16

Parse CPU to Parse Elapsd %: 66.67 % Non-Parse CPU: 97.75


In this case they are also good.


4)Top 5 Timed Foreground Events:


Check the average wait times.Anything over 5ms indicates a problem.If we see database CPU events in the Top 5, this indicates that SGA is too small.We may also be missing indexes.Check the optimizer statistics.


Here are the Top 5 from my environment:


Top 5 Timed Foreground Events

~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Avg

wait % DB

Event Waits Time(s) (ms) time Wait Class

------------------------------ ------------ ----------- ------ ------ ----------

DB CPU 15 59.9

log file sync 1,592 8 5 32.3 Commit

sort segment request 1 1 1001 4.0 Configurat

db file sequential read 216 1 4 3.6 User I/O

db file scattered read 64 0 6 1.5 User I/O


we can see here that the major issue is DB CPU, which generally indicates SGA is too small. However, in this case it is high because this report was run on a VM with the database and BPM sharing the CPU and disk.


Database file sequential/scattered read


These indicate time spent doing table scans and index scans (respectively).If these are high (over 5ms),We should consider moving your data files to reduce disk I/O contention, or move them to faster disks. 


5)Enqueue high watermark:


This indicates hardware contention that occurs when there are multiple users inserting into LOB segments at once while the database is trying to reclaim unused space. 

We should consider enabling secure files to improve LOB performance (SECURE_FILES=ALWAYS).


We cannot see these in my example report, because this was not a problem in my environment, so it did not make it into the Top 5.If it did, you would see an event called:enq: HW - contention


Other things to be aware of…


We will also check our database configuration.


6)MEMORY_TARGET:


Do not use this setting.We should have our DBA tune the memory manually instead.This will result in a better tuned database.We should start with 60% of physical memory allocated to SGA and 20% to PGA.



7)AUDIT_TRAIL:


Usually we do not use this setting much for tuning.But auditing on Database level

can be overhead to the Database.





Happy Tuning Oracle Databases ...



Best regards,


Rafi. 

Posted by Rafi at 2:57 AM 1 comment:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks, INTERVIEWS, Performance Tuning 

Reactions:

Tuesday, October 18, 2011

HOW TO RESOLVE ROW-CHAINING 

We know the row-chaining happen,when a row that is too large cannot fit on a single block. 


Eg: If we have a 2k blocksize but our row size is 4k, we will use 3 blocks to store that row in pieces. 


Row-chaining can occur because of below reasons:


1)A table have a long/long raw Data types in some cases

2) A table's rowsize exceeds the blocksize and

3)Any table with more then 255 columns 


We can resolve ROW-CHAINING by following below 2 steps:


Step:1.FIND OUT IF ROW-CHAINING IS OCCURING.

$sqlplus scott/tiger

SQL>exec dbms_stats.gather_schema_stat(‘scott’)

OR

SQL>exec dbms_stat.gather_table_stat('scott','emp')

SQL>select chaint_cnt from user_tables where TABLE_NAME='EMP';



NOTE: Any non-zero value means there is row-chaining.


Step 2:RESOLVE THE CHAINING


 RUN THE FOLLOWING COMMAND TO CREATE chained_rows table.


$sqlplus "/as sysdba"


SQL>@$ORACLE_HOME/rdbms/admin/utlchain.sql


SQL>ANALYZE TABLE EMP LIST CHAINED ROWS INTO CHAINED_ROWS;


SQL>create table temp_emp as 

Select * from emp

Where rowid in (select HEAD_ROWID from chained_rows

where TABLE_NAME= ‘EMP’;


SQL>DELETE FROM EMP WHERE ROWID IN(

select HEAD_ROWID from chained_rows 

where TABLE_NAME= ‘EMP’);


SQL>INSERT INTO EMP SELECT * FROM TEMP_EMP;


SQL>DROP TABLE TEM-_EMP;

SQL>DROP TABLE CHAINED_ROWS;


Note:Remember,ROW CHAINING occurs because of UPDATE operations and not because of INSERT operations.


Hope it helps...


Happy Oracle DBA learning


Best regards,


Rafi.


Posted by Rafi at 10:40 AM No comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA tasks 

Reactions:

Friday, October 14, 2011

Refreshing schemas in oracle Databases 

Hi,

Schema refresh task might be regular for DBA's working on Database migration project.Schema refresh is done to make our production Database data in sync with developmnent,test and performance environment.


Below I'm describing one such task.Lot of time we might need to do a set of schemas so it is very important we make a document or plan for doing this task effectively.In the below task we have 2 environments .PRODDB(production) and TESTDB(test).I'm refreshing TESTDB by taking Data from PRODB,here only one schema is refreshed.


Source side:



Preparatory Steps:


Create directory or use an exiting directory by giving read and write permission for 'system' Database user to use that direcotry(TEST_MIG).


SQL> grant read,write on directory TEST_MIG to system;


Grant succeeded.


SQL> alter user system identified by TESTDBdba account unlock;


PRODDB:


Step 1:Exporting the Data from the source Database(PRODDB in our case)


vi expdp_refresh_schema_sep27.sh


$ expdp system/PRODDB@PRODDB DUMPFILE=REFRESH_SCHEMA.DMP DIRECTORY=DATA_PUMP_DIR SCHEMAS=REFRESH_SCHEMA LOGFILE=REFRESH_SCHEMA.log


$ nohup sh expdp_refresh_schema_sep27.sh>refresh_schema.out &


Step 2:Copying the dump file(Source Data) to Target Database server


We can use 'winscp' tool(A graphical utility for copying files from windows to linux or viceversa) or ftp or scp or tar or rsync for coping Data from source server to target server.


Step 3:Moving Data into the target Database.



$ impdp system/TESTDBdba@TESTDB DUMPFILE=REFRESH_SCHEMA.DMP DIRECTORY=TEST_MIG REMAP_SCHEMA=REFRESH_SCHEMA:REFRESH_SCHEMA LOGFILE=REFRESH_SCHEMA.log



Step 4:Verify the Data in Source and Target Databases.


Note:

In oracle 11g rel2,version:11.2.0.1.0 there are about 44 Distinct object_types comparing to previous versions this number is huge.


SQL> select *from v$version;


BANNER

--------------------------------------------------------------------------------

Oracle Database 11g Enterprise Edition Release 11.2.0.1.0 - 64bit Production

PL/SQL Release 11.2.0.1.0 - Production

CORE 11.2.0.1.0 Production

TNS for Linux: Version 11.2.0.1.0 - Production

NLSRTL Version 11.2.0.1.0 - Production



SQL> select distinct object_type from dba_objects;


OBJECT_TYPE

-------------------

EDITION

INDEX PARTITION

CONSUMER GROUP

SEQUENCE

TABLE PARTITION

SCHEDULE

QUEUE

RULE

JAVA DATA

PROCEDURE

OPERATOR


OBJECT_TYPE

-------------------

LOB PARTITION

DESTINATION

WINDOW

SCHEDULER GROUP

DATABASE LINK

LOB

PACKAGE

PACKAGE BODY

LIBRARY

PROGRAM

RULE SET


OBJECT_TYPE

-------------------

CONTEXT

TYPE BODY

JAVA RESOURCE

XML SCHEMA

TRIGGER

JOB CLASS

UNDEFINED

DIRECTORY

MATERIALIZED VIEW

TABLE

INDEX


OBJECT_TYPE

-------------------

SYNONYM

VIEW

FUNCTION

JAVA CLASS

JAVA SOURCE

INDEXTYPE

CLUSTER

TYPE

RESOURCE PLAN

JOB

EVALUATION CONTEXT


44 rows selected.


Source Database:



PRODDB:

---------


SQL> select count(*) from dba_objects

where owner='REFRESH_SCHEMA';


COUNT(*)

----------

132


SQL> select count(*) from dba_tables

where owner='REFRESH_SCHEMA';


COUNT(*)

----------

34


SELECT COUNT(*) FROM DBA_OBJECTS 

WHERE OWNER='REFRESH_SCHEMA' 

AND OBJECT_TYPE IN('TABLE','JOB','VIEW','PACKAGE','TRIGGER','SYNONYM','FUNCTION','PROCEDURE','TYPE')

ORDER BY OBJECT_TYPE;

SQL> SELECT COUNT(*) FROM DBA_OBJECTS

WHERE OWNER='REFRESH_SCHEMA'

AND OBJECT_TYPE IN('TABLE','JOB','VIEW','PACKAGE','TRIGGER','SYNONYM','FUNCTION','PROCEDURE','TYPE')

ORDER BY OBJECT_TYPE;

2 3 4

COUNT(*)

----------

62



SELECT COUNT(*) FROM DBA_OBJECTS 

WHERE OWNER='REFRESH_SCHEMA' 

AND OBJECT_TYPE IN



TARGET DATABASE:



TESTDB:

-------------

SQL> select count(*) from dba_objects

where owner='REFRESH_SCHE'; 2


COUNT(*)

----------

131


SQL> select count(*) from dba_tables

where owner='APEX4_DEV'; 2


COUNT(*)

----------

34


SQL> SELECT COUNT(*) FROM DBA_OBJECTS

WHERE OWNER='REFRESH_SCHEMA'

AND OBJECT_TYPE IN('TABLE','JOB','VIEW','PACKAGE','TRIGGER','SYNONYM','FUNCTION','PROCEDURE','TYPE')

ORDER BY OBJECT_TYPE;


COUNT(*)

----------

62


Hope it helps.Happy refreshing our Database environments

Starting and Stopping Oracle Application server 10g sequence 

Hi All,

Oracle Application Server 10g (OAS 10g) Starting and Stopping sequence is very important when you have to bring all the services up & running.If the sequence changes than you might face some issues like some services will be down and not possible to bring up those services,This issue I faced when working with Oracle 10g Appplication server where weekly maintainance activity result in some applications down,so the correct order of starting and Stopping Oracle 10g Application server services are as follows:


Starting Oracle 10g Application server:


1)Start the listener.

Start the listener from the server where Your Application services are running

$lsnrctl start


2)Start Database.


1)Start the listener.

Start the listener from the server where Your Application services are running

On Windows server:

C:\Users\user>set ORACLE_SID=DB_NAME

SQL>startup


On Unix based server:

$export ORACLE_SID=DB_NAME

SQL>startup


3)Start Infrastructure services:

Make sure you are in Infra Home=>opmn=>bin directory.

$cd Oracle10g/Infra/opmn/bin

$opmnctl startall


4)Start Middle-tier services:

Make sure you are in Middle-tier Home=>opmn=>bin directory.

$cd Oracle10g/MidTier/opmn/bin

$opmnctl startall


Note 1:opmn stands for oracle process manager and notification server.

opmnctl startall:Start all application services like HTTP,OC4J,Wireless.....

opmnctl stopall: Stop all application services like HTTP,OC4J,Wireless.....


Note 2:Individual service or component can also be started/stopped with the help of opmnctl as follows:

$opmnctl startproc process-type=OC4J Home

$opmnctl startproc component-type=OC4J 

$opmnctl stopproc process-type=OC4J Home

$opmnctl stoproc component-type=OC4J

$opmnctl startproc ias-component=wireless 

$opmnctl stopproc ias-component=wireless

$opmnctl stopproc process-type=OC4J_Wireless

$opmnctl startproc process-type=OC4J_Wireless


Stopping Oracle 10g Application server:


When you are stopping the services this order becomes reverse,so you have to follow the below steps:


1)Stop Middle-tier services:

Make sure you are in Middle-tier Home=>opmn=>bin directory.

$cd Oracle10g/MidTier/opmn/bin

$opmnctl stopall


2)Stop Infrastructure services:

Make sure you are in Infra Home=>opmn=>bin directory.

$cd Oracle10g/Infra/opmn/bin

$opmnctl stopall


3)Stop Database:

On Windows server:

C:\Users\user>set ORACLE_SID=DB_NAME

SQL>shutdown immediate


On Unix based server:

export ORACLE_SID=DB_NAME

SQL>shutdown immediate


4)Stop the listener:

Stop the listener from the server where Your Application services are running

$lsnrctl stop


Hope it helps.


Tuesday, August 19, 2014

Health check script for Oracle EBS Database and Application 11i/R12 

Hi Apps DBA,


Lot of times we need to check  the Health of Oracle EBS Database and Application,The below script

can be very handy for it.


SQL>conn apps/***

SQL>spool health_check_apps_db.out 


set pages 1000

set linesize 135

col PROPERTY_NAME for a25

col PROPERTY_VALUE for a15

col DESCRIPTION for a35

col DIRECTORY_PATH for a70

col directory_name for a25

col OWNER for a10

col DB_LINK for a40

col HOST for a20

col "User_Concurrent_Queue_Name" format a50 heading 'Manager'

col "Running_Processes" for 9999 heading 'Running'

set head off

set feedback off

set echo off


break on utl_file_dir


select '--------------------------------------------------------------------------------' from dual;

select '-----------------------     Database Checks    ---------------------------------' from dual;

select '--------------------------------------------------------------------------------' from dual;

Prompt

select '************************ Getting Database Information  *************' from dual ;


select 'Database Name..................... : '||name from v$database;

select 'Database Status................... : '||open_mode from v$database;

select 'Archiving Status.................. : '||log_mode  from v$database;

select 'Global Name....................... : '||global_name from global_name;

select 'Creation Date..................... : '||to_char(created,'DD-MON-YYYY HH24:MI:SS') from v$database;

select 'Checking For Missing File......... : '||count(*) from v$recover_file;

select 'Checking Missing File Name ....... : '||count(*) from v$datafile where name like '%MISS%';

select 'Total SGA ........................ : '||round(sum(value)/(1024*1024))||' MB' from v$sga ;

select 'Database Version.................. : '||version from v$instance;

select 'Temporary Tablespace.............. : '||property_value from database_properties

                                                where property_name like 'default_temp_tablespace';

select 'Apps Temp Tablespace.............. : '||temporary_tablespace from dba_users where username like '%APPS%';

select 'Temp Tablespace size.............. : '||sum(maxbytes/1024/1024/1024)||' GB' from dba_temp_files group by tablespace_name;

select 'No of Invalid Object ............. : '||count(*) from dba_objects where status = 'INVALID' ;

select 'service Name...................... : '||value from v$parameter2 where name='service_names';

select 'plsql code type................... : '||value from v$parameter2 where name='plsql_code_type';

select 'plsql subdir count................ : '||value from v$parameter2 where name='plsql_native_library_subdir_count';

select 'plsql native library dir.......... : '||value from v$parameter2 where name='plsql_native_library_dir';

select 'Shared Pool Size.........,........ : '||(value/1024/1024) ||' MB' from v$parameter where name='shared_pool_size';

select 'Log Buffer........................ : '||(value/1024/1024) ||' MB' from v$parameter where name='log_buffer';

select 'Buffer Cache...................... : '||(value/1024/1024) ||' MB' from v$parameter where name='db_cache_size';

select 'Large Pool Size................... : '||(value/1024/1024) ||' MB' from v$parameter where name='large_pool_size';

select 'Java Pool Size.................... : '||(value/1024/1024) ||' MB' from v$parameter where name='java_pool_size';

select 'utl_file_dir...................... : '||value from v$parameter2 where name='utl_file_dir';

select directory_name||'.................... : '||directory_path from all_directories where rownum  < 15 ;


select '************************ Getting Apps Information *****************' from dual ;


select 'Home URL.......................... : '||home_url from apps.icx_parameters ;

select 'Session Cookie.................... : '||session_cookie from apps.icx_parameters ;

select 'Applicaiton Database ID........... : '||fnd_profile.value('apps_database_id') from dual;

select 'GSM Enabled....................... : '||fnd_profile.value('conc_gsm_enabled') from dual;

select 'Maintainance Mode................. : '||fnd_profile.value('apps_maintenance_mode') from dual;

select 'Site Name......................... : '||fnd_profile.value('Sitename')from dual;

select 'Bug Number........................ : '||bug_number from ad_bugs where bug_number='2728236';


select '************************ Doing Workflow Checks ********************' from dual ;


select 'No Open Notifications............. : '||count(*) from wf_notifications where mail_status in('MAIL','INVALID','OPEN');

select 'Name(wf_systems).................. : '||name from wf_systems;

select 'Display Name(wf_systems).......... : '||display_name from wf_systems;

select 'Address........................... : '||address from wf_agents;

select 'Workflow Mailer Status............ : '||component_status from applsys.fnd_svc_components

                                                where component_name like 'Workflow Notification Mailer';

select 'Test Address...................... : '||b.parameter_value

                                                from fnd_svc_comp_param_vals_v a, fnd_svc_comp_param_vals b

                                                where a.parameter_id=b.parameter_id

                                                and a.parameter_name in ('TEST_ADDRESS');

select 'From Address...................... : '||b.parameter_value

                                                from fnd_svc_comp_param_vals_v a, fnd_svc_comp_param_vals b

                                                where a.parameter_id=b.parameter_id

                                                and a.parameter_name in ('FROM');

select 'WF Admin Role..................... : '||text from wf_resources where name = 'WF_ADMIN_ROLE' and  rownum =1;



Prompt

Prompt Getting Apps Node Info

Prompt ************************

select Node_Name,'........................ : '||server_id from fnd_nodes;

select server_type||'......................: '||name from fnd_app_servers, fnd_nodes

                                                where fnd_app_servers.node_id =fnd_nodes.node_id;


select '************************ Doing Conc Mgr Checks  ********************' from dual ;


Prompt Getting Con Mgr Status

Prompt ************************

Prompt

Prompt Manager Name                                                 Hostname          No of Proc Running

Prompt ~~~~~~~~~~~~                                                 ~~~~~~~~          ~~~~~~~~~~~~~~~~~~

set lines 145

Column Target_Node   Format A12

select User_Concurrent_Queue_Name,'....... : '||Target_Node||' ...... : '||Running_Processes

                                                from fnd_concurrent_queues_vl

                                                where Running_Processes = Max_Processes

                                                and Running_Processes > 0;


Prompt

Prompt Getting Pending Request

Prompt ***********************

--select user_concurrent_program_name||'........ : '||request_id

--                                                  from fnd_concurrent_requests r, fnd_concurrent_programs_vl p, fnd_lookups s, fnd_lookups ph

--                                                  where r.concurrent_program_id = p.concurrent_program_id

--                                                 and r.phase_code = ph.lookup_code

--                                                and ph.lookup_type = 'CP_PHASE_CODE'

--                                               and r.status_code = s.lookup_code

--                                                  and s.lookup_type = 'CP_STATUS_CODE'

--                                                  and ph.meaning ='Pending'

--                                                  and rownum < 10

--                                                  order by to_date(actual_start_date, 'dd-MON-yy hh24:mi');

--


Prompt

Prompt Getting Workflow Components Status

Prompt **********************************


set pagesize 1000

set linesize 125

col COMPONENT_STATUS for a20

col COMPONENT_NAME for a45

col STARTUP_MODE for a12


select fsc.COMPONENT_NAME,

fsc.STARTUP_MODE,

fsc.COMPONENT_STATUS,

fcq.MAX_PROCESSES TARGET,

fcq.RUNNING_PROCESSES ACTUAL

from APPS.FND_CONCURRENT_QUEUES_VL fcq, APPS.FND_CP_SERVICES fcs,

APPS.FND_CONCURRENT_PROCESSES fcp, fnd_svc_components fsc

where fcq.MANAGER_TYPE = fcs.SERVICE_ID

and fcs.SERVICE_HANDLE = 'FNDCPGSC'

and fsc.concurrent_queue_id = fcq.concurrent_queue_id(+)

and fcq.concurrent_queue_id = fcp.concurrent_queue_id(+)

and fcq.application_id = fcp.queue_application_id(+)

and fcp.process_status_code(+) = 'A'

order by fcp.OS_PROCESS_ID, fsc.STARTUP_MODE;


select '--------------------------------------------------------------------------------' from dual;

select '-----------------------     End Of Database Checks  ----------------------------' from dual;

select '--------------------------------------------------------------------------------' from dual;


SQL>spool off



Enjoy Oracle Apps DBA tasks..

Health check script for Oracle EBS Database and Application 11i/R12 

Hi Apps DBA,


Lot of times we need to check  the Health of Oracle EBS Database and Application,The below script

can be very handy for it.


SQL>conn apps/***

SQL>spool health_check_apps_db.out 


set pages 1000

set linesize 135

col PROPERTY_NAME for a25

col PROPERTY_VALUE for a15

col DESCRIPTION for a35

col DIRECTORY_PATH for a70

col directory_name for a25

col OWNER for a10

col DB_LINK for a40

col HOST for a20

col "User_Concurrent_Queue_Name" format a50 heading 'Manager'

col "Running_Processes" for 9999 heading 'Running'

set head off

set feedback off

set echo off


break on utl_file_dir


select '--------------------------------------------------------------------------------' from dual;

select '-----------------------     Database Checks    ---------------------------------' from dual;

select '--------------------------------------------------------------------------------' from dual;

Prompt

select '************************ Getting Database Information  *************' from dual ;


select 'Database Name..................... : '||name from v$database;

select 'Database Status................... : '||open_mode from v$database;

select 'Archiving Status.................. : '||log_mode  from v$database;

select 'Global Name....................... : '||global_name from global_name;

select 'Creation Date..................... : '||to_char(created,'DD-MON-YYYY HH24:MI:SS') from v$database;

select 'Checking For Missing File......... : '||count(*) from v$recover_file;

select 'Checking Missing File Name ....... : '||count(*) from v$datafile where name like '%MISS%';

select 'Total SGA ........................ : '||round(sum(value)/(1024*1024))||' MB' from v$sga ;

select 'Database Version.................. : '||version from v$instance;

select 'Temporary Tablespace.............. : '||property_value from database_properties

                                                where property_name like 'default_temp_tablespace';

select 'Apps Temp Tablespace.............. : '||temporary_tablespace from dba_users where username like '%APPS%';

select 'Temp Tablespace size.............. : '||sum(maxbytes/1024/1024/1024)||' GB' from dba_temp_files group by tablespace_name;

select 'No of Invalid Object ............. : '||count(*) from dba_objects where status = 'INVALID' ;

select 'service Name...................... : '||value from v$parameter2 where name='service_names';

select 'plsql code type................... : '||value from v$parameter2 where name='plsql_code_type';

select 'plsql subdir count................ : '||value from v$parameter2 where name='plsql_native_library_subdir_count';

select 'plsql native library dir.......... : '||value from v$parameter2 where name='plsql_native_library_dir';

select 'Shared Pool Size.........,........ : '||(value/1024/1024) ||' MB' from v$parameter where name='shared_pool_size';

select 'Log Buffer........................ : '||(value/1024/1024) ||' MB' from v$parameter where name='log_buffer';

select 'Buffer Cache...................... : '||(value/1024/1024) ||' MB' from v$parameter where name='db_cache_size';

select 'Large Pool Size................... : '||(value/1024/1024) ||' MB' from v$parameter where name='large_pool_size';

select 'Java Pool Size.................... : '||(value/1024/1024) ||' MB' from v$parameter where name='java_pool_size';

select 'utl_file_dir...................... : '||value from v$parameter2 where name='utl_file_dir';

select directory_name||'.................... : '||directory_path from all_directories where rownum  < 15 ;


select '************************ Getting Apps Information *****************' from dual ;


select 'Home URL.......................... : '||home_url from apps.icx_parameters ;

select 'Session Cookie.................... : '||session_cookie from apps.icx_parameters ;

select 'Applicaiton Database ID........... : '||fnd_profile.value('apps_database_id') from dual;

select 'GSM Enabled....................... : '||fnd_profile.value('conc_gsm_enabled') from dual;

select 'Maintainance Mode................. : '||fnd_profile.value('apps_maintenance_mode') from dual;

select 'Site Name......................... : '||fnd_profile.value('Sitename')from dual;

select 'Bug Number........................ : '||bug_number from ad_bugs where bug_number='2728236';


select '************************ Doing Workflow Checks ********************' from dual ;


select 'No Open Notifications............. : '||count(*) from wf_notifications where mail_status in('MAIL','INVALID','OPEN');

select 'Name(wf_systems).................. : '||name from wf_systems;

select 'Display Name(wf_systems).......... : '||display_name from wf_systems;

select 'Address........................... : '||address from wf_agents;

select 'Workflow Mailer Status............ : '||component_status from applsys.fnd_svc_components

                                                where component_name like 'Workflow Notification Mailer';

select 'Test Address...................... : '||b.parameter_value

                                                from fnd_svc_comp_param_vals_v a, fnd_svc_comp_param_vals b

                                                where a.parameter_id=b.parameter_id

                                                and a.parameter_name in ('TEST_ADDRESS');

select 'From Address...................... : '||b.parameter_value

                                                from fnd_svc_comp_param_vals_v a, fnd_svc_comp_param_vals b

                                                where a.parameter_id=b.parameter_id

                                                and a.parameter_name in ('FROM');

select 'WF Admin Role..................... : '||text from wf_resources where name = 'WF_ADMIN_ROLE' and  rownum =1;



Prompt

Prompt Getting Apps Node Info

Prompt ************************

select Node_Name,'........................ : '||server_id from fnd_nodes;

select server_type||'......................: '||name from fnd_app_servers, fnd_nodes

                                                where fnd_app_servers.node_id =fnd_nodes.node_id;


select '************************ Doing Conc Mgr Checks  ********************' from dual ;


Prompt Getting Con Mgr Status

Prompt ************************

Prompt

Prompt Manager Name                                                 Hostname          No of Proc Running

Prompt ~~~~~~~~~~~~                                                 ~~~~~~~~          ~~~~~~~~~~~~~~~~~~

set lines 145

Column Target_Node   Format A12

select User_Concurrent_Queue_Name,'....... : '||Target_Node||' ...... : '||Running_Processes

                                                from fnd_concurrent_queues_vl

                                                where Running_Processes = Max_Processes

                                                and Running_Processes > 0;


Prompt

Prompt Getting Pending Request

Prompt ***********************

--select user_concurrent_program_name||'........ : '||request_id

--                                                  from fnd_concurrent_requests r, fnd_concurrent_programs_vl p, fnd_lookups s, fnd_lookups ph

--                                                  where r.concurrent_program_id = p.concurrent_program_id

--                                                 and r.phase_code = ph.lookup_code

--                                                and ph.lookup_type = 'CP_PHASE_CODE'

--                                               and r.status_code = s.lookup_code

--                                                  and s.lookup_type = 'CP_STATUS_CODE'

--                                                  and ph.meaning ='Pending'

--                                                  and rownum < 10

--                                                  order by to_date(actual_start_date, 'dd-MON-yy hh24:mi');

--


Prompt

Prompt Getting Workflow Components Status

Prompt **********************************


set pagesize 1000

set linesize 125

col COMPONENT_STATUS for a20

col COMPONENT_NAME for a45

col STARTUP_MODE for a12


select fsc.COMPONENT_NAME,

fsc.STARTUP_MODE,

fsc.COMPONENT_STATUS,

fcq.MAX_PROCESSES TARGET,

fcq.RUNNING_PROCESSES ACTUAL

from APPS.FND_CONCURRENT_QUEUES_VL fcq, APPS.FND_CP_SERVICES fcs,

APPS.FND_CONCURRENT_PROCESSES fcp, fnd_svc_components fsc

where fcq.MANAGER_TYPE = fcs.SERVICE_ID

and fcs.SERVICE_HANDLE = 'FNDCPGSC'

and fsc.concurrent_queue_id = fcq.concurrent_queue_id(+)

and fcq.concurrent_queue_id = fcp.concurrent_queue_id(+)

and fcq.application_id = fcp.queue_application_id(+)

and fcp.process_status_code(+) = 'A'

order by fcp.OS_PROCESS_ID, fsc.STARTUP_MODE;


select '--------------------------------------------------------------------------------' from dual;

select '-----------------------     End Of Database Checks  ----------------------------' from dual;

select '--------------------------------------------------------------------------------' from dual;


SQL>spool off



Enjoy Oracle Apps DBA tasks..


How FNDLOAD Utility is useful for Oracle Apps DBA 

Hi,

FNDLOAD(Generic Loader) utility is very useful for Apps DBA.Let us try to understand how it works and how we can utilize this utility well.


Understanding FNDLOAD utility: 

The Generic Loader (FNDLOAD) is a concurrent program that can download data from an application entity into a portable,editable text file. This file can then be uploaded into any other database to copy the data. 

Data structures supported by the Loader include master- detail relationships and foreign key relationships.

FNDLOAD uses script to ensure consistent migration of objects within Oracle Applications


FNDLOAD utility Modes of Operation:


The FNDLOAD(Generic Loader) utility operates in 2 modes:

1)Download mode or

2)Upload mode

In the download mode data is downloaded from a database according to a configuration (.lct) file and then converts the data into a Data (.ldt) file. This data file can be uploaded to a different database.In both downloading and uploading, the structure of the data involved is described by a configuration file. 

The configuration file describes the structure of the data and also the access methods use to copy the data into or out of the database. 

The same configuration file may be used for both uploading and downloading.

When downloading,the Generic Loader creates a second file, called the data file that contains the structured data selected for downloading. 

The data file has a standard syntax for representing the data that has been downloaded.


When uploading,the Generic Loader reads a data file to get the data that it is to upload.In most cases, the data file was produced by a previous download, but may have come from another source. 

The data file cannot be interpreted without the corresponding configuration file available.



FNDLOAD utility syntax:


DOWNLOAD COMMAND SYNTAX:


FNDLOAD 0 Y DOWNLOAD <${FND_TOP}/patch/115/import/ 


UPLOAD COMMAND SYNTAX:


FNDLOAD 0 Y UPLOAD <${FND_TOP}/patch/115/import/ 


FNDLOAD Usage Details:


FNDLOAD can be used to migrate the following system administrator objects between instances

1.Printer Styles

2.Lookup Types and codes

3.Descriptive Flexfield (DFF)

4.Key Flexfield (KFF)

5.Concurrent programs with the parameters

6.Request Sets (when the programs are not triggered based on success)

7.Value Sets and Value set Values

8.Profiles

9.Request Groups

10.Responsibilities

11.Forms

12.Functions

13.Menus

14.Messages


Merits of FNDLOAD utility:


1.Need to maintain a baseline environment (Source for Clone) and update it on a regular basis

2.Base environment can have issues and the cloning strategy made totally ineffective. In such a case every new environment created will have to be updated with a lot of changes

3.Cloning /Refresh not possible in short intervals

4.Selective replication of setups and AOL objects not possible with cloning

5.Environments not delivered as per timeline and affecting the Testing schedules

6.Manually maintaining Environments at different level of configuration is tedious

7.Time consuming to manually update multiple environment with defect fixes and error prone

8.FNDLOAD is fully supported and recommended by Oracle for migration of FND objects. Requires 0 learning curve and 0 investment.


Demerits of FNDLOAD utility:



1.This utility can be only used for FND (System administrator) objects only.

2.Application Patching mechanisms use FNDLOAD heavily. There is a possibility of negative impact.

3.There is no validation of sensitive data that is being migrated by the FNDLOAD tool itself.


Examples of FNDLOAD utility:


Download: FNDLOAD apps/$pwd O Y DOWNLOAD $FND_TOP/patch/115/import/aflvmlu.lct lookup_techops_aris.ldt FND_LOOKUP_TYPE APPLICATION_SHORT_NAME="CN" LOOKUP_TYPE="XXTEST_TECHOPS_ARIS_SITES"


Download: FNDLOAD apps/$pwd O Y DOWNLOAD $FND_TOP/patch/115/import/aflvmlu.lct lookup_techops_points.ldt FND_LOOKUP_TYPE APPLICATION_SHORT_NAME="CN" LOOKUP_TYPE="XXTEST_TECHOPS_POINTS_SITES"


Download: FNDLOAD apps/apps O Y DOWNLOAD $FND_TOP/patch/115/import/afcpprog.lct concprg_XXTEST_techops_points_procedure.ldt PROGRAM APPLICATION_SHORT_NAME="XXTEST" CONCURRENT_PROGRAM_NAME="XXTEST_TECHOPS_POINTS_PRCS_EAST"


Download: FNDLOAD apps/apps O Y DOWNLOAD $FND_TOP/patch/115/import/afcpprog.lct concprg_XXTEST_techops_points_load.ldt PROGRAM APPLICATION_SHORT_NAME="XXTEST" CONCURRENT_PROGRAM_NAME="XXTEST_TECHOPS_POINTS_EAST_LOAD"


Download: FNDLOAD apps/apps O Y DOWNLOAD $FND_TOP/patch/115/import/afcpprog.lct concprg_XXTEST_techops_points_load.ldt PROGRAM APPLICATION_SHORT_NAME="XXTEST" CONCURRENT_PROGRAM_NAME="XXTEST_TECHOPS_POINTS_MW_LOAD"


Download: FNDLOAD apps/apps O Y DOWNLOAD $FND_TOP/patch/115/import/afcpprog.lct concprg_XXTEST_techops_mttr_load.ldt PROGRAM APPLICATION_SHORT_NAME="XXTEST" CONCURRENT_PROGRAM_NAME="XXTEST_TECHOPS_MTTR_LOAD"


Download: FNDLOAD apps/apps O Y DOWNLOAD $FND_TOP/patch/115/import/afcpprog.lct concprg_XXTEST_techops_mttr_process.ldt PROGRAM APPLICATION_SHORT_NAME="XXTEST" CONCURRENT_PROGRAM_NAME="XXTEST_TECHOPS_MTTR_DATA_PROCES"



Upload: FNDLOAD apps/$pwd O Y UPLOAD $FND_TOP/patch/115/import/aflvmlu.lct lookup_techops_aris.ldt 


Upload: FNDLOAD apps/$pwd O Y UPLOAD $FND_TOP/patch/115/import/aflvmlu.lct lookup_techops_points.ldt 


Upload: FNDLOAD apps/apps O Y UPLOAD $FND_TOP/patch/115/import/afcpprog.lct concprg_xxtest_techops_points_procedure.ldt 


Upload: FNDLOAD apps/apps O Y UPLOAD $FND_TOP/patch/115/import/afcpprog.lct concprg_xxtest_techops_points_load.ldt 


Upload: FNDLOAD apps/apps O Y UPLOAD $FND_TOP/patch/115/import/afcpprog.lct concprg_xxtest_techops_points_load.ldt 


Upload: FNDLOAD apps/apps O Y UPLOAD $FND_TOP/patch/115/import/afcpprog.lct concprg_xxtest_techops_mttr_load.ldt 


Upload: FNDLOAD apps/apps O Y UPLOAD $FND_TOP/patch/115/import/afcpprog.lct concprg_xxtest_techops_mttr_process.ldt 



Enjoy and Happy Oracle Apps DBA learning.


Hope it helps...



Best regards,


Rafi. 

Posted by Rafi at 11:12 PM 4 comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: Apps DBA tasks, Apps R12 Concepts 

Reactions:

Monday, March 14, 2011

R12 Applications File System 

 

 

 


Hi,

Few days back I have posted about the 3-tiers 1)Destop 2)Application 3)Database,

I'm continuing with R12 application filesystem in this post.



II)Applications File System– Overview


The Top-Level R12-Applications Directory Structure is divided into 3 parts:

1)Database Server files

2)Instant specific files

3)Application tier Server



1)Database Server files(db):

• The db/apps_st/data directory is located on the database node machine, and contains the system tablespaces, redo log files, data tablespaces, index tablespaces,and database files

• The db/tech_st/10.2.0 directory is located on the database node machine, and contains the ORACLE_HOME for the Oracle10g database

• The apps/apps_st/appl (APPL_TOP) directory contains the product directories and

files for Oracle Applications.

• The apps/apps_st/comn (COMMON_TOP) directory contains Java classes, HTML

pages, and other files and directories used by multiple products

• The apps/tech_st/10.1.2 directory contains the ORACLE_HOME used for the

Applications technology stack tools components.

• The apps/tech_st/10.1.3 directory contains the ORACLE_HOME used for the

Applications technology stack Java components.


2)Instant specific files(inst):

Instance Home (INST_TOP):


*Oracle Applications Release 12 introduces the concept of a top-level directory for an Applications instance. This directory is referred to as the Instance Home, and denoted by the environment variable $INST_TOP.

*Using an Instance Home provides the ability to share Applications and technology.


Notable features of this architecture include:


• The latest version of Oracle Containers for Java (OC4J), the successor to JServ, is

included in Oracle Application Server 10.1.3.

• All major services are started out of the OracleAS 10.1.3 ORACLE_HOME.

• The Applications modules (packaged in the file formsapp.ear) are deployed into the

OC4J-Forms instance running out of the OracleAS 10.1.3 ORACLE_HOME, while the frmweb executable is invoked out of the OracleAS 10.1.2 ORACLE_HOME.

*stack code among multiple instances, for example a development instance and a test instance.

*Support for read-only file systems and centralization of log files.


3)Application tier Server files(apps):

ORACLE_HOMEs:

There are 3 ORACLE_HOMEs in the Architecture of R12:

One ORACLE Database 10g rel2( Oracle 10.2.0 Home) &

Two ORACLE Application Server(OracleAS Homes)


*Use of Two Oracle Application Server ORACLE_HOMEs in Release 12

Two different Oracle Application Server (OracleAS) 10g releases, in separate ORACLE_HOMEs, are used in Oracle Applications Release 12. 

*This enables Oracle Applications to take advantage of the latest Oracle technologies.

• The Oracle Application Server 10.1.3 ORACLE_HOME (sometimes referred to as

the Web or Java ORACLE_HOME) replaces the 8.1.7-based ORACLE_HOME

provided by Oracle9i Application Server 1.0.2.2.2 in Release 11i).



Hope it helps in understanding Oracle R12 Application filesystem.



Best regards,


Rafi. 

Posted by Rafi at 11:37 PM No comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: Apps R12 Concepts 

Reactions:

Tuesday, February 22, 2011

Oracle R12 Application Architecture 

 

 


Hi,

We can understand Oracle R12 Application by dividing it into 3 major components:

I)Oracle Applications Architecture

II)Applications File System– Overview

III)Applications Database Organization


I)Oracle Applications Architecture:


Let us see the structure in this post.

Oracle Application can be divided into


1)Desktop Tier

2)The Application Tier

3)The Database Tier


I)Desktop Tier:


*The client interface is provided through HTML for HTML-based applications, and via a Java applet in a Web browser for the traditional Forms-based applications.

*In Oracle Applications Release 12, each user logs in to Oracle Applications through the E-Business Suite Home Page on a desktop client web browser as shown in fig.1.

*The E-Business SuiteHome Page as shown in fig.1 provides a single point of access to HTML-basedapplications,Forms-basedapplications, and Business Intelligence applications.


II)The Application Tier:


*The application tier has a dual role: hosting the various servers and service groups thatprocess the business logic, and managing communication between the desktop tier andthe database tier. This tier is sometimes referred to as the middle tier.

*3 servers or service groups comprise the basic application tier for Oracle Applications:


1)Web services:The Web services component of Oracle Application Server processes requests received over the network from the desktop clients.


2)Forms services:Forms services in Oracle Applications Release 12 are provided by the Forms listener servlet or Form Socket mode,which facilitates the use of firewalls,load balancing,proxies and other networking options.


3)Concurrent services:Processes that run on the Concurrent Processing server are called concurrent requests.A concurrent manager then reads the applicable requests in the table and starts the associated concurrent program


III)The Database Tier:


*The database tier contains the Oracle database server, which stores all the data maintained by Oracle Applications. The database also stores the Oracle Applications online help information.

*More specifically, the database tier contains the Oracle data server files and Oracle Applications database executables that physically store the tables, indexes, and other database objects for your system.

* The database server does not communicate directly with the desktop clients, but rather with the servers on the application tier, which mediate the communications between the database server and the clients.


Hope it helps at initial level of R12 Application undestanding.



Best regards,


Saturday, August 29, 2015

Difference in location of Log files in Oracle EBS Release 12.1.3 and Oracle EBS Release 12.2.4 

Hi All,


Lot of companies planning to Upgrade from Oracle EBS Release 12.1.3(Rel 12.1.x)  to Oracle EBS Release 12.2.4(Rel 12.2.x). Few of the companies already upgraded.


The Log files locations in Oracle EBS Release 12.1.3 and  Oracle EBS R 12.2.4  are given below:


1.Instance startup and configuration Log files are located for INST_TOP in Oracle Release 12.1.3 are below:


$INST_TOP/logs/appl/admin/log

Startup/Shutdown error message related to tech stack (10.1.2, 10.1.3 forms/reports/web)

$INST_TOP/logs/ora/ (10.1.2 & 10.1.3)

$INST_TOP/logs/ora/10.1.3/Apache/error_log[timestamp](Apache log files)

$INST_TOP/logs/ora/10.1.3/opmn/ (OC4J, oa*, opmn.log)

$INST_TOP/logs/ora/10.1.2/network/ (listener log)

$INST_TOP/apps/$CONTEXT_NAME/logs/appl/conc/log (CM log files)


2. Log files related to cloning in R12.1.3 are as below:


 Preclone log files in source instance

Database Tier – $ORACLE_HOME/appsutil/log/$CONTEXT_NAME/(StageDBTier_MMDDHHMM.log)

Application Tier –

$INST_TOP/apps/$CONTEXT_NAME/admin/log/(StageAppsTier_MMDDHHMM.log)


Clone log files in target instance

Database Tier – $ORACLE_HOME/appsutil/log/$CONTEXT_NAME/ApplyDBTier_.log

Apps Tier – $INST_TOP/admin/log/ApplyAppsTier_.log


3. Patching related log files in R12.1.3 are as below:


i) Application Tier adpatch log – $APPL_TOP/admin/$SID/log/

ii) Developer (Developer/Forms & Reports 10.1.2) Patch – $ORACLE_HOME/.patch_storage

iii) Web Server (Apache) patch – $IAS_ORACLE_HOME/.patch_storage

iv) Database Tier opatch log – $ORACLE_HOME/.patch_storage



4. Autoconfig related log files in R12.1.3 are as below:


a) Database Tier Autoconfig log :

$ORACLE_HOME/appsutil/log/$CONTEXT_NAME/MMDDHHMM/adconfig.log

$ORACLE_HOME/appsutil/log/$CONTEXT_NAME/MMDDHHMM/NetServiceHandler.log



b) Application Tier Autoconfig log : 

$INST_TOP/apps/$CONTEXT_NAME/admin/log/$MMDDHHMM/adconfig.log


5.Autoconfig context file location in R12.1.3 :

$INST_TOP/apps/$CONTEXT_NAME/appl/admin/$CONTEXT_NAME.xml



6)R12.1.3 Installation Logs in R12.1.3 are as below:


 Database Tier Installation

RDBMS_ORACLE_HOME/appsutil/log/$CONTEXT_NAME/.log

RDBMS_ORACLE_HOME/appsutil/log/$CONTEXT_NAME/ApplyDBTechStack_.log

RDBMS_ORACLE_HOME/appsutil/log/$CONTEXT_NAME/ohclone.log

RDBMS_ORACLE_HOME/appsutil/log/$CONTEXT_NAME/make_.log

RDBMS_ORACLE_HOME/appsutil/log/$CONTEXT_NAME/installdbf.log

RDBMS_ORACLE_HOME/appsutil/log/$CONTEXT_NAME/adcrdb_.log RDBMS_ORACLE_HOME/appsutil/log/$CONTEXT_NAME/ApplyDatabase_.log

RDBMS_ORACLE_HOME/appsutil/log/$CONTEXT_NAME//adconfig.log

RDBMS_ORACLE_HOME/appsutil/log/$CONTEXT_NAME//NetServiceHandler.log

Application Tier Installation

$INST_TOP/logs/.log

$APPL_TOP/admin/$CONTEXT_NAME/log/ApplyAppsTechStack.log

$INST_TOP/logs/ora/10.1.2/install/make_.log

$INST_TOP/logs/ora/10.1.3/install/make_.log

$INST_TOP/admin/log/ApplyAppsTechStack.log

$INST_TOP/admin/log/ohclone.log

$APPL_TOP/admin/$CONTEXT_NAME/log/installAppl.log

$APPL_TOP/admin/$CONTEXT_NAME/log/ApplyAppltop_.log

$APPL_TOP/admin/$CONTEXT_NAME/log//adconfig.log

$APPL_TOP/admin/$CONTEXT_NAME/log//NetServiceHandler.log

Inventory Registration:

$Global Inventory/logs/cloneActions.log

$Global Inventory/logs/oraInstall.log

$Global Inventory/logs/silentInstall.log


7) Log files related with relink,Network,OUT inventory logs for R12.1.3 are as below:

 1) Database Tier

1.1) Relink Log files :

$ORACLE_HOME/appsutil/log/$CONTEXT_NAME /MMDDHHMM/ make_$MMDDHHMM.log

1.2) Alert Log Files :

$ORACLE_HOME/admin/$CONTEXT_NAME/bdump/alert_$SID.log

1.3) Network Logs :

$ORACLE_HOME/network/admin/$SID.log

1.4) OUI Logs :

OUI Inventory Logs :

$ORACLE_HOME/admin/oui/$CONTEXT_NAME/oraInventory/logs

2) Application Tier

$ORACLE_HOME/j2ee/DevSuite/log

$ORACLE_HOME/opmn/logs

$ORACLE_HOME/network/logs

Tech Stack Patch 10.1.3 (Web/HTTP Server)

$IAS_ORACLE_HOME/j2ee/forms/logs

$IAS_ORACLE_HOME/j2ee/oafm/logs

$IAS_ORACLE_HOME/j2ee/oacore/logs

$IAS_ORACLE_HOME/opmn/logs

$IAS_ORACLE_HOME/network/log

$INST_TOP/logs/ora/10.1.2

$INST_TOP/logs/ora/10.1.3

$INST_TOP/logs/appl/conc/log

$INST_TOP/logs/appl/admin/log



In EBS R12.2.4 the log files locations are as below:


1)Log files file Online patching (adop) in EBS R12.2.4 are in below location:


The adop log files are located on the non-editioned file system (fs_ne), under:


$NE_BASE/EBSapps/log/adop/<adop_session_id>/<phase>_<date>_<time>/<context_name>/log


This log directory will contain patch logs,patch worker logs.


adop(phase=fs_clone) Online pathcing filesystem cloning process related log files are found under:


$INST_TOP/admin/log



2)Log files for Autoconfig process in Oracle EBS R12.2.4 are below:


On Applicaion Tier: $INST_TOP/admin/log/<MMDDhhmm>

On Database Tier: $ORACLE_HOME/appsutil/log/<CONTEXT_NAME>/<MMDDhhmm>


3)Log files for start/stop of services from $ADMIN_SCRIPTS_HOME


In below directory we will find log files related to start/stop process of oacore, forms, apache, opmn, 

weblogic admin server/node manager:


$LOG_HOME/appl/admin/log



4)Log/Out files for Concurrent programs/managers in Oracle R12.2.4 are in below location:


Log/Out files for Oracle Release 12.2 are stored in Non-Editioned filesystem(NE).


Log files: $APPLCSF/$APPLLOG (or $NE_BASE/inst/<CONTEXT_NAME>/logs/appl/conc/log)

Out files: $APPLCSF/$APPLOUT (or $NE_BASE/inst/<CONTEXT_NAME>/logs/appl/conc/out)



5)Log files for OPMN and OHS processes in Oracle R12.2.4 are in below location:


Below directory contains log files related OPMN process(opmn.log), 

OPMN Debug logs(debug.log), HTTP Transaction logs (access.log),security settings related logs.


$IAS_ORACLE_HOME/instances/<ohs_instance>/diagnostics/logs



6)Log file for Weblogic Node Manager in Oracle R12.2.4 are in below location:


Log file is generated by Node Manager and contains data for all domains that 

are controlled by Node Manager on a given physical machine.


$FMW_HOME/wlserver_10.3/common/nodemanager/nmHome1/nodemanager.log



7)Log file for Weblogic  in Oracle R12.2.4 for Oracle Management Service are below 


Initial settings for AdminServer and Domain level information is written in this log file


$EBS_DOMAIN_HOME/sysman/log



8)Log files for server processes initiated through Weblogic in Oracle R12.2.4 are in below location:

Stdout and stderr messages generated by the forms, oafm and oacore services are located 

at NOTICE severity level or higher are written by Weblogic Node Manager to below directory.


$EBS_DOMAIN_HOME/servers/<server_name>/logs/<server_name>.out



Enjoy Oracle EBS R12.2 learning and Performing tasks.



Thanks,


Rafi.

Posted by Rafi at 9:36 AM No comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: Apps DBA tasks, Apps R12.2 

Reactions:

Saturday, May 30, 2015

Difference between Oracle EBS R12.1.3 and Oracle EBS R12.2.4 

Hi,


It's been a long time since I wrote a blog post, I was busy working with Oracle EBS upgrades,content server setup, and Oracle SOA 11g upgrade projects. We have recently upgraded Oracle EBS 12.1.3 to Oracle EBS R12.2.4.


There are lot of difference.Below are the key ones, I will try to extend this post based on difference noticed by me as I work more rigorously.


1)In R12.2 we have to kind of filesystems fs1(run filesystem) and fs2(patch filesystem) where as In R12.1.3 we only deal with one application filesystem.


2)In R12.2 we have the Application servers replaced by Weblogic server to manage the technology statck.

The 10.1.3 Home is replaced by FMW  (Fusion Middleware Home) i.e $FMW_HOME


The major change in R12.2 is involvement of Weblogic server to manager all the forms,oacore servers

where as in R12.1.3 we had the Application server 10.1.3 to manage the web home or Java Home


3)The  adpatch(patching) in R 12.1.3 is replaced by adop(online patching) in R 12.2.4


adop(online patching) utility involves 5 phases to apply a standard patch in Oracle EBS R12.2.

adop involves 5 phases

1)prepare => prepare phase involves synchronization of the filesystems fs1 (run ,filesystem) and fs2(patch filesystem), filesystems are inter changeable.

2)apply=>In this phase we apply all the patches

Note: These patches need to be copied in fs_ne (non editioned filesystems)

3)finalize =>In this phase we are getting ready for cutover phase

4)cutover =>In cutover phase the filesystem switchover takes place. Previously the filesystem which was patch filesystem will now become run filesystem.)

5) cleanup =>In cleanup phase all the obsolute objects gets compiled .


adop online patching utility doesn't require downtime. It involves minimal downtime during cutover phase where switching of filesystems happens. We can apply lot of patches and do cutover any time to minimize downtime


where as in  adpatch we just apply patch most of the times by bringing down applications or in hot patch mode.


4)'Apps' user Password change:

The 'apps' user password change involve 3 steps in R12.2.4

1)Change the password with FNDCPASS

2)Change the password in EBS Datasource from Weblogic admin console

3)Run AutoconfigCheck below link from Bala for detail steps: 


http://balaoracledba.com/2014/05/15/r12-2-changing-apps-or-applsys-password-on-r12-2-instance/


Where as in R 12.1.3 It only involves 2 steps:

1)Change the password with FNDCPASS

2)Run Autoconfig

5)Log files locations:

All the oacore,forms log files are placed in $EBS_DOMAIN_HOME in R12.2. I will explain this in details in my upcoming posts.

Where as in R12.1.3 all the logs for oacore,forms are under $LOG_HOME/ora/10.1.3


I will come up with lot of difference in this thread so that we all gets comfortable with Oracle EBS Release 12.2.4

 Happy Oracle Apps DBA reading and learning EBS R12.2.4...


Friday, July 4, 2014

How to activate Periodic Alert Scheduler in Oracle EBS Application 


Hi Apps DBAs,


 We can activate Periodic Alert Scheduler in Oracle EBS Application


Step 1:Login to ORACLE EBS Application HOME PAGE with Alert Manager Responsibility


We need to Login the Oracle EBS Application HOME PAGE with Alert Manager Responsibility or else assign the responsibility to the User


In the Database we can login with 'Apps' user and check the user exists 


SQL> SELECT USER_NAME from fnd_user where  USER_NAME like '%RAFI%';


USER_NAME

--------------------------------------------------------------------------------

RAFI


How to check responsibilities assigned to a user:


SQL> SELECT B.RESPONSIBILITY_NAME

     FROM FND_USER_RESP_GROUPS A,

     FND_RESPONSIBILITY_VL B,

     FND_USER C

     WHERE A.responsibility_id = B.responsibility_id AND

     C.user_id = A.user_id AND

     (to_char(A.end_date) IS NULL

     OR A.end_date > sysdate)

     AND C.user_name like '%RAFI%';



RESPONSIBILITY_NAME

-------------------------------------------------------------------------------- 

TL System Administrator (Read Only)

TL System Administrator

Alert Manager


Step 2: We need to have alert Manager Responsibily to do this task.


We need to Navigate to Alert Manager Responsibility


Navigate to alert ->Define ;Go to Request->Schedule


there we can see periodic Alert Scheduler =>Active it (Click on Activate button)


Screenshot below for reference.

 





Enjoy Apps DBA tasks....



Thanks,


Rafi 






















































Posted by Rafi at 7:54 AM 2 comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: Apps DBA Queries, Apps DBA tasks 

Reactions:

Friday, November 25, 2011

Useful Queries for Apps DBA and possible solutions 

Hi,

Apps DBAs use lot of queries to find the useful information from the Database.I will try to list as much as I can in the below thread.

This thread I will update continously so that It can help one and all.


1)How to find the E-business suite login URL?

Ans: 

SQL> conn apps

Enter password:

Connected.

SQL> select home_url from icx_parameters;


HOME_URL

--------------------------------------------------------------------------------

http://testnode1.comp.com:8000/OA_HTML/AppsLogin



2)How to find the release of Apps installed or version installed in our machine?

Ans:conn apps

Enter password:

Connected.


SQL> select release_name from fnd_product_groups;


RELEASE_NAME

--------------------------------------------------

12.1.1


3)What is Yellow Bar Warning in Apps?


Ans: Oracle Applications Release 11.5.1 (11i) requires that its code run in a trusted mode and uses J-Initiator to run Java applets on a desktop client. If an applet is “trusted,” however, Java will extend the privileges of the applet.The Yellow Warning Bar is a warning that your applet is not running in a trusted mode.To indicate that an applet is trusted, it must be digitally signed using a digital Certificate,so Oracle Applications requires that all Java archive files must be digitally signed.


4)How to check the custom top installled?


Ans:


SQL> Select BASEPATH,PRODUCT_CODE,APPLICATION_SHORT_NAME

From fnd_application

Where application_Short_name like '%CUST_TOP_name%';


5)How to check multi-org is enabled in Oracle applications?


Ans:


SQL> select multi_org_flag from fnd_product_groups;


M

-

Y

Note:For enabling multi-org check the MY ORACLE SUPPORT notes 396351.1 and 220601.1


6)How to compile invalid objects in Oracle Applications?


Ans: Check the below link for all possible ways to compile the invalid objects in Oracle Application.Usually 'adadmin' utility provides us the option to do this task.


http://onlineappsdba.blogspot.com/2008/05/how-to-compile-invalid-objects-in-apps.html



7)Can we install Apps Tier and Database Tier on different Operating system while installing Oracle EBS 11i/R12?

Ans: Yes it is possible.We can do this by following below MY ORACLE SUPPORT notes:


Oracle Apps 11i --> Using Oracle EBS with a Split Configuration Database Tier on 11gR2 [ID 946413.1]


Oracle Apps R12 --> Oracle EBS R12 with Database Tier Only Platform on Oracle Database 11.2.0 [ID 456347.1]



8)How to find the node details in Oracle Applications?

Ans: FND_NODES tables in 'apps' schema helps in finding node details after installation,clonning and migration of applications.

SQL> SELECT NODE_NAME||' '||STATUS ||' '||NODE_ID||' '||HOST

FROM FND_NODES;


9)How to see the products installed and their versions in Oracle Applications?

Ans:


SQL> SELECT APPLICATION_ID||''||ORACLE_ID||''||PRODUCT_VERSION||''||STATUS||''||PATCH_LEVEL

FROM FND_PRODUCT_INSTALLATIONS;


O/P looks like below:


172 172 12.0.0 I R12.CCT.B.1

191 191 12.0.0 I R12.BIS.B.1

602 602 12.0.0 I R12.XLA.B.1

805 805 12.0.0 I R12.BEN.B.1

8302 800 12.0.0 I R12.PQH.B.1

8303 800 12.0.0 I R12.PQP.B.1

809 809 12.0.0 I 11i.HXC.C

662 662 12.0.0 I R12.RLM.B.1

663 663 12.0.0 I R12.VEA.B.1

298 298 12.0.0 N R12.POM.B.1

185 185 12.0.0 I R12.XTR.B.1


10)How to see the concurrent Requests and jobs in Oracle Applications?

Ans: FND_CONCURRENT_REQUESTS can be used to see the concurrent requests and job details.These details are useful

in troubleshooting concurrent manager related issues.


SQL>SELECT REQUEST_ID||' '||REQUEST_DATE||' '||REQUESTED_BY||' '||PHASE_CODE||' '||STATUS_CODE

FROM FND_CONCURRENT_REQUESTS;


O/P will be as given below:

REQUEST_ID||''||REQUEST_DATE||''||REQUESTED_BY||''||PHASE_CODE||''||STATUS_CODE

--------------------------------------------------------------------------------------------------------

6088454 24-NOV-11 1318 P I

6088455 24-NOV-11 1318 P Q

6088403 24-NOV-11 0 C C

6088410 24-NOV-11 0 C C



Where:


PHASE_CODE column can have values:

C Completed

I Inactive

P Pending

R Running


STATUS_CODE Column can have values:

A Waiting

B Resuming

C Normal

D Cancelled

E Error

F Scheduled

G Warning

H On Hold

I Normal

M No Manager

Q Standby

R Normal

S Suspended

T Terminating

U Disabled

W Paused

X Terminated

Z Waiting



11)What is the significance of FND_INSTALL_PROCESSES and AD_DEFERRED_JOBS tables?

Ans: FND_INSTALL_PROCESSES and AD_DEFERRED_JOBS tables are created and Dropped during the 'adadmin' and 'adpatch' sessions.

Both AD utilities (adpatch/adadmin) access the same tables to store the workers details, so both FND_INSTALL_PROCESSES and

AD_DEFERRED_JOBS tables need to be dropped from the failed adpatch session ,so that adadmin/adpatch session can run successfully next time.



Happy Apps DBA learning

Oracle Database Health check scripts 

Hi All,

Lot of time DBAs are asked to check the health of the Database,Health of the Database can be check in various ways.It includes:


SL No Monitoring Scope Current Status OS Level 

1 Physical memory / Load :Load normal, Load averages: 0.35, 0.37, 0.36

2 OS Space threshold ( archive, ora_dump etc.. ) :Sufficient Space available.

3 Top 10 process consuming memory:No process using exceptional high memory

4 Free volumes available :Sufficient disk space is available on the mount points

5 Filesystem space Under normal threshold

Database level. 

6 Check extents / Pro active Space addition:Space is being regularly added. 

7 Check alert log for ORA- and warn messages. 

8 Major wait events (latch/enqueue/Lib cache pin) No major wait events

9 Max Sessions 

10 Long running Jobs 6 inactive sessions running for more than 8 hrs

11 Invalid objects 185 

12 Analyze Jobs ( once in a week ) Done on 20-JAN-2008 Time 06:00:06

13 Temp usage / Rollback segment usage Normal

14 Nologging Indexes 

15 Hotbackup/Coldbackup Gone fine

16 Redo generation normal 

17 PQ proceses Normal

18 I/O Generation Under normal threshold

19 2 PC Pending transactions 0

DR / backup 

1 Sync arch Normal

2 Purge arch Normal

3 Recovery status Normal 

20)DATABASE HEALTH CHECK SCRIPT: Showing locks and Archive generation details 


In Detail DATABASE Health check:

OPERATING SYSTEM:


1)Physical memory/ Load:

1) Free:free command displays amount of total, free and used physical memory (RAM) in the system as well as showing information on shared memory, buffers, cached memory and swap space used by the Linux kernel.

Usage:

$ free -m


2) vmstat:vmstat reports report virtual memory statistics, which has information about processes, swap, free, buffer and cache memory, paging space, disk IO activity, traps, interrupts, context switches and CPU activity

Usage:

$vmstat 5


3) top:top command displays dynamic real-time view of the running tasks managed by kernel and in Linux system. The memory usage stats by top command include real-time live total, used and free physical memory and swap memory with their buffers and cached memory size respectively

Usage:

$top

4) ps :ps command reports a snapshot on information of the current active processes. ps will show the percentage of memory resource that is used by each process or task running in the system. With this command, top memory hogging processes can be identified.

Usage:

$ps aux


2) OS Space threshold ( archive, ora_dump etc.. ):

Checking the OS space is available in all filesystems,specially the location which is having archive logs ,oracle Database files.We can use the below OS commands:

$df –h

$du –csh *

3) Top 10 process consuming memory:

We can Displaying top 10 memory consuming processes as follows:


ps aux|head -1;ps aux|sort -m


We can use the top command, and press M which orders the process list by memory usage.


4) Free volumes available:


We have to make sure Sufficient disk space is available on the mount points on each OS servers where the Database is up and running.


$df –h


5)Filesystem space:


Under normal threshold.Check the filesystem in the OS side whether the sufficient space is available at all mount points.


DATABASE :


6)Check extents / Pro active Space addition:

Check each of the Data,Index and temporary tablespaces for extend and blocks

Allocation details.


SET LINES 1000

SELECT SEGMENT_NAME,TABLESPACE_NAME,EXTENTS,BLOCKS

FROM DBA_SEGMENTS;


SELECT SEGMENT_NAME,TABLESPACE_NAME,EXTENTS,BLOCKS

FROM DBA_SEGMENTS WHERE TABLESPACE_NAME=’STAR01D’;


7) Check alert log for ORA- and warn messages:


Checking the alert log file regulary is a vital task we have to do.In the alert log files we have to looks for the following things:


1) Look for any of the oracle related errors.

Open the alert log file with less or more command and search for ORA- 

This will give you the error details and time of occurrence.


2) Look for the Database level or Tablespace level changes

Monitor the alert log file and search the file for each Day activities happening

In the Database either whether it is bouncing of Database.Increase in the size of the tablespaces,Increase in the size of the Database parameters.In the 11g Database we can look for TNS errors in the alert log file.


8) Major wait events (latch/enqueue/Lib cache pin):


We can check the wait events details with the help of below queries:


SELECT s.saddr, s.SID, s.serial#, s.audsid, s.paddr, s.user#, s.username,

s.command, s.ownerid, s.taddr, s.lockwait, s.status, s.server,

s.schema#, s.schemaname, s.osuser, s.process, s.machine, s.terminal,

UPPER (s.program) program, s.TYPE, s.sql_address, s.sql_hash_value,

s.sql_id, s.sql_child_number, s.sql_exec_start, s.sql_exec_id,

s.prev_sql_addr, s.prev_hash_value, s.prev_sql_id,

s.prev_child_number, s.prev_exec_start, s.prev_exec_id,

s.plsql_entry_object_id, s.plsql_entry_subprogram_id,

s.plsql_object_id, s.plsql_subprogram_id, s.module, s.module_hash,

s.action, s.action_hash, s.client_info, s.fixed_table_sequence,

s.row_wait_obj#, s.row_wait_file#, s.row_wait_block#,

s.row_wait_row#, s.logon_time, s.last_call_et, s.pdml_enabled,

s.failover_type, s.failover_method, s.failed_over,

s.resource_consumer_group, s.pdml_status, s.pddl_status, s.pq_status,

s.current_queue_duration, s.client_identifier,

s.blocking_session_status, s.blocking_instance, s.blocking_session,

s.seq#, s.event#, s.event, s.p1text, s.p1, s.p1raw, s.p2text, s.p2,

s.p2raw, s.p3text, s.p3, s.p3raw, s.wait_class_id, s.wait_class#,

s.wait_class, s.wait_time, s.seconds_in_wait, s.state,

s.wait_time_micro, s.time_remaining_micro,

s.time_since_last_wait_micro, s.service_name, s.sql_trace,

s.sql_trace_waits, s.sql_trace_binds, s.sql_trace_plan_stats,

s.session_edition_id, s.creator_addr, s.creator_serial#

FROM v$session s

WHERE ( (s.username IS NOT NULL)

AND (NVL (s.osuser, 'x') <> 'SYSTEM')

AND (s.TYPE <> 'BACKGROUND') AND STATUS='ACTIVE'

)

ORDER BY "PROGRAM";


The following query provides clues about whether Oracle has been waiting for library cache activities:


Select sid, event, p1raw, seconds_in_wait, wait_time

From v$session_wait

Where event = 'library cache pin'

And state = 'WAITING';


The below Query gives details of Users sessions wait time and state:


SELECT NVL (s.username, '(oracle)') AS username, s.SID, s.serial#, sw.event,

sw.wait_time, sw.seconds_in_wait, sw.state

FROM v$session_wait sw, v$session s

WHERE s.SID = sw.SID

ORDER BY sw.seconds_in_wait DESC;


9) Max Sessions:

There should not be more than 6 inactive sessions running for more than 8 hours in a Database in order to minimize the consumption of CPU and I/O resources.


a)Users and Sessions CPU consumption can be obtained by below query:


Set lines 1000

select ss.username, se.SID,VALUE/100 cpu_usage_seconds

from v$session ss, v$sesstat se, v$statname sn

where se.STATISTIC# = sn.STATISTIC#

and NAME like '%CPU used by this session%'

and se.SID = ss.SID and ss.status='ACTIVE'

and ss.username is not null

order by VALUE desc;



b) Users and Sessions CPU and I/O consumption can be obtained by below query:


-- shows Day wise,User wise,Process id of server wise- CPU and I/O consumption 

set linesize 140

col spid for a6

col program for a35 trunc

select p.spid SPID,to_char(s.LOGON_TIME,'DDMonYY HH24:MI') date_login,s.username,decode(nvl(p.background,0),1,bg.description, s.program ) program,

ss.value/100 CPU,physical_reads disk_io,(trunc(sysdate,'J')-trunc(logon_time,'J')) days,

round((ss.value/100)/(decode((trunc(sysdate,'J')-trunc(logon_time,'J')),0,1,(trunc(sysdate,'J')-trunc(logon_time,'J')))),2) cpu_per_day 

from V$PROCESS p,V$SESSION s,V$SESSTAT ss,V$SESS_IO si,V$BGPROCESS bg

where s.paddr=p.addr and ss.sid=s.sid

and ss.statistic#=12 and si.sid=s.sid

and bg.paddr(+)=p.addr 

and round((ss.value/100),0) > 10 

order by 8;


10) Long running Jobs:


We can find out long running jobs with the help of the below query:


col username for a20 

col message for a50 

col remaining for 9999 

select username,to_char(start_time, 'hh24:mi:ss dd/mm/yy') started,

time_remaining remaining, message 

from v$session_longops 

where time_remaining = 0 

order by time_remaining desc;


11) Invalid objects:


We can check the invalid objects with the help of the below query:


select owner||' '||object_name||' '||created||' '||status from dba_objects where status='INVALID';


12) Analyze Jobs ( once in a week ):


We need to analyze the jobs that are running once in a week as a golden rule.

The below steps can be considered for analyzing jobs.


Analyzing a Running Job 

The status of a job or a task changes several times during its life cycle. A job can have the following as its status: 

Scheduled: The job is created and will run at the specified time.

Running: The job is being executed and is in progress.

Initialization Error: The job or step could not be run successfully. If a step in a job fails initialization, the job status is Initialization Error.

Failed: The job was executed but failed.

Succeeded: The job was executed completely.

Stopped: The user canceled the job.

Stop Pending: The user has stopped the job. The already running steps are completing execution. 

Suspended: This indicates that the execution of the job is deferred.

Inactive: This status indicates that the target has been deleted.

Reassigned: The owner of the job has changed.

Skipped: The job was not executed at the specified time and has been omitted.

The running jobs can be found out by the help of below query:


select sid, job,instance from dba_jobs_running;


We can find out the failed jobs and Broken jobs details with the help of the Below query:


select job||' '||schema_user||' '||Broken||' '||failures||' '||what||' '||last_date||' '||last_sec from dba_jobs;


13) Temp usage / Rollback segment/PGA usage:


We can get information of temporary tablespace usage details with the help of below query:

Set lines 1000

SELECT b.tablespace,

ROUND(((b.blocks*p.value)/1024/1024),2)||'M' "SIZE",

a.sid||','||a.serial# SID_SERIAL,

a.username,

a.program

FROM sys.v_$session a,

sys.v_$sort_usage b,

sys.v_$parameter p

WHERE p.name = 'db_block_size'

AND a.saddr = b.session_addr

ORDER BY b.tablespace, b.blocks;


We can get information of Undo tablespace usage details with the help of the below query:

set lines 1000

SELECT TO_CHAR(s.sid)||','||TO_CHAR(s.serial#) sid_serial,

NVL(s.username, 'None') orauser,

s.program,

r.name undoseg,

t.used_ublk * TO_NUMBER(x.value)/1024||'K' "Undo"

FROM sys.v_$rollname r,

sys.v_$session s,

sys.v_$transaction t,

sys.v_$parameter x

WHERE s.taddr = t.addr

AND r.usn = t.xidusn(+)

AND x.name = 'db_block_size';


We can get the PGA usage details with the help of the below query:

select st.sid "SID", sn.name "TYPE",

ceil(st.value / 1024 / 1024/1024) "GB" 

from v$sesstat st, v$statname sn 

where st.statistic# = sn.statistic# 

and sid in 

(select sid from v$session where username like UPPER('&user'))

and upper(sn.name) like '%PGA%' 

order by st.sid, st.value desc;

Enter value for user: STARTXNAPP

14)Validating the Backup:


We have to verify the Hotbackup/Coldbackup(or any physical or logical backup) of all the Production and non-production Databases went fine.Make sure you are having a valid backups of all the Databases.Check the Backup locations to make sure the Backup completed on time with the required Backup data.


14)Hotbackup/Coldbackup:

Validating the backup of Database.It should complete on time with the required data for restoring and recovery purpose if required.


15) Redo generation/Archive logs generation details:

We should make sure there should not be frequent log switch happening in a Database.If there are frequent log switches than archive logs might generate more which may decrease the performance of the Database however in a production Database log switches could vary depending upon the Server configuration between 5 to 20.


We can the log switch details with the help of the below query:


Redolog switch Datewise and hourwise:

-------------------------------

set lines 120; 

set pages 999;

select to_char(first_time,'DD-MON-RR') "Date",

to_char(sum(decode(to_char(first_time,'HH24'),'00',1,0)),'99') " 00",

to_char(sum(decode(to_char(first_time,'HH24'),'01',1,0)),'99') " 01",

to_char(sum(decode(to_char(first_time,'HH24'),'02',1,0)),'99') " 02",

to_char(sum(decode(to_char(first_time,'HH24'),'03',1,0)),'99') " 03",

to_char(sum(decode(to_char(first_time,'HH24'),'04',1,0)),'99') " 04",

to_char(sum(decode(to_char(first_time,'HH24'),'05',1,0)),'99') " 05",

to_char(sum(decode(to_char(first_time,'HH24'),'06',1,0)),'99') " 06",

to_char(sum(decode(to_char(first_time,'HH24'),'07',1,0)),'99') " 07",

to_char(sum(decode(to_char(first_time,'HH24'),'08',1,0)),'99') " 08",

to_char(sum(decode(to_char(first_time,'HH24'),'09',1,0)),'99') " 09",

to_char(sum(decode(to_char(first_time,'HH24'),'10',1,0)),'99') " 10",

to_char(sum(decode(to_char(first_time,'HH24'),'11',1,0)),'99') " 11",

to_char(sum(decode(to_char(first_time,'HH24'),'12',1,0)),'99') " 12",

to_char(sum(decode(to_char(first_time,'HH24'),'13',1,0)),'99') " 13",

to_char(sum(decode(to_char(first_time,'HH24'),'14',1,0)),'99') " 14",

to_char(sum(decode(to_char(first_time,'HH24'),'15',1,0)),'99') " 15",

to_char(sum(decode(to_char(first_time,'HH24'),'16',1,0)),'99') " 16",

to_char(sum(decode(to_char(first_time,'HH24'),'17',1,0)),'99') " 17",

to_char(sum(decode(to_char(first_time,'HH24'),'18',1,0)),'99') " 18",

to_char(sum(decode(to_char(first_time,'HH24'),'19',1,0)),'99') " 19",

to_char(sum(decode(to_char(first_time,'HH24'),'20',1,0)),'99') " 20",

to_char(sum(decode(to_char(first_time,'HH24'),'21',1,0)),'99') " 21",

to_char(sum(decode(to_char(first_time,'HH24'),'22',1,0)),'99') " 22",

to_char(sum(decode(to_char(first_time,'HH24'),'23',1,0)),'99') " 23"

from v$log_history

group by to_char(first_time,'DD-MON-RR')

order by 1

/

Archive logs generations is directly proportional to the number of log switches happening in a Database. If there are frequent log switches than archive logs might generate more which can affect the performance of Database.



We can use the below queries for archive logs generation details:


a)Archive logs by dates:

set lines 1000

select to_char(first_time,'DD-MON-RR') "Date",

to_char(sum(decode(to_char(first_time,'HH24'),'00',1,0)),'99') " 00",

to_char(sum(decode(to_char(first_time,'HH24'),'01',1,0)),'99') " 01",

to_char(sum(decode(to_char(first_time,'HH24'),'02',1,0)),'99') " 02",

to_char(sum(decode(to_char(first_time,'HH24'),'03',1,0)),'99') " 03",

to_char(sum(decode(to_char(first_time,'HH24'),'04',1,0)),'99') " 04",

to_char(sum(decode(to_char(first_time,'HH24'),'05',1,0)),'99') " 05",

to_char(sum(decode(to_char(first_time,'HH24'),'06',1,0)),'99') " 06",

to_char(sum(decode(to_char(first_time,'HH24'),'07',1,0)),'99') " 07",

to_char(sum(decode(to_char(first_time,'HH24'),'08',1,0)),'99') " 08",

to_char(sum(decode(to_char(first_time,'HH24'),'09',1,0)),'99') " 09",

to_char(sum(decode(to_char(first_time,'HH24'),'10',1,0)),'99') " 10",

to_char(sum(decode(to_char(first_time,'HH24'),'11',1,0)),'99') " 11",

to_char(sum(decode(to_char(first_time,'HH24'),'12',1,0)),'99') " 12",

to_char(sum(decode(to_char(first_time,'HH24'),'13',1,0)),'99') " 13",

to_char(sum(decode(to_char(first_time,'HH24'),'14',1,0)),'99') " 14",

to_char(sum(decode(to_char(first_time,'HH24'),'15',1,0)),'99') " 15",

to_char(sum(decode(to_char(first_time,'HH24'),'16',1,0)),'99') " 16",

to_char(sum(decode(to_char(first_time,'HH24'),'17',1,0)),'99') " 17",

to_char(sum(decode(to_char(first_time,'HH24'),'18',1,0)),'99') " 18",

to_char(sum(decode(to_char(first_time,'HH24'),'19',1,0)),'99') " 19",

to_char(sum(decode(to_char(first_time,'HH24'),'20',1,0)),'99') " 20",

to_char(sum(decode(to_char(first_time,'HH24'),'21',1,0)),'99') " 21",

to_char(sum(decode(to_char(first_time,'HH24'),'22',1,0)),'99') " 22",

to_char(sum(decode(to_char(first_time,'HH24'),'23',1,0)),'99') " 23"

from v$log_history 

group by to_char(first_time,'DD-MON-RR')

order by 1

/

b)Archive log generation details Day-wise :


select to_char(COMPLETION_TIME,'DD-MON-YYYY'),count(*) 

from v$archived_log group by to_char(COMPLETION_TIME,'DD-MON-YYYY') 

order by to_char(COMPLETION_TIME,'DD-MON-YYYY');


c) Archive log count of the day:


select count(*)

from v$archived_log

where trunc(completion_time)=trunc(sysdate); 


count of archived logs generated today on hourly basis:

-------------------------------------------------------

select to_char(first_time,'DD-MON-RR') "Date",

to_char(sum(decode(to_char(first_time,'HH24'),'00',1,0)),'99') " 00",

to_char(sum(decode(to_char(first_time,'HH24'),'01',1,0)),'99') " 01",

to_char(sum(decode(to_char(first_time,'HH24'),'02',1,0)),'99') " 02",

to_char(sum(decode(to_char(first_time,'HH24'),'03',1,0)),'99') " 03",

to_char(sum(decode(to_char(first_time,'HH24'),'04',1,0)),'99') " 04",

to_char(sum(decode(to_char(first_time,'HH24'),'05',1,0)),'99') " 05",

to_char(sum(decode(to_char(first_time,'HH24'),'06',1,0)),'99') " 06",

to_char(sum(decode(to_char(first_time,'HH24'),'07',1,0)),'99') " 07",

to_char(sum(decode(to_char(first_time,'HH24'),'08',1,0)),'99') " 08",

to_char(sum(decode(to_char(first_time,'HH24'),'09',1,0)),'99') " 09",

to_char(sum(decode(to_char(first_time,'HH24'),'10',1,0)),'99') " 10",

to_char(sum(decode(to_char(first_time,'HH24'),'11',1,0)),'99') " 11",

to_char(sum(decode(to_char(first_time,'HH24'),'12',1,0)),'99') " 12",

to_char(sum(decode(to_char(first_time,'HH24'),'13',1,0)),'99') " 13",

to_char(sum(decode(to_char(first_time,'HH24'),'14',1,0)),'99') " 14",

to_char(sum(decode(to_char(first_time,'HH24'),'15',1,0)),'99') " 15",

to_char(sum(decode(to_char(first_time,'HH24'),'16',1,0)),'99') " 16",

to_char(sum(decode(to_char(first_time,'HH24'),'17',1,0)),'99') " 17",

to_char(sum(decode(to_char(first_time,'HH24'),'18',1,0)),'99') " 18",

to_char(sum(decode(to_char(first_time,'HH24'),'19',1,0)),'99') " 19",

to_char(sum(decode(to_char(first_time,'HH24'),'20',1,0)),'99') " 20",

to_char(sum(decode(to_char(first_time,'HH24'),'21',1,0)),'99') " 21",

to_char(sum(decode(to_char(first_time,'HH24'),'22',1,0)),'99') " 22",

to_char(sum(decode(to_char(first_time,'HH24'),'23',1,0)),'99') " 23"

from v$log_history

where to_char(first_time,'DD-MON-RR')='16-AUG-10'

group by to_char(first_time,'DD-MON-RR')

order by 1

/


16)I/O Generation:

We can find out CPU and I/O generation details for all the users in the Database with the help of the below query:

-- Show IO per session,CPU in seconds, sessionIOS.

set linesize 140

col spid for a6

col program for a35 trunc

select p.spid SPID,to_char(s.LOGON_TIME,'DDMonYY HH24:MI') date_login,s.username,decode(nvl(p.background,0),1,bg.description, s.program ) program,

ss.value/100 CPU,physical_reads disk_io,(trunc(sysdate,'J')-trunc(logon_time,'J')) days,

round((ss.value/100)/(decode((trunc(sysdate,'J')-trunc(logon_time,'J')),0,1,(trunc(sysdate,'J')-trunc(logon_time,'J')))),2) cpu_per_day 

from V$PROCESS p,V$SESSION s,V$SESSTAT ss,V$SESS_IO si,V$BGPROCESS bg

where s.paddr=p.addr and ss.sid=s.sid

and ss.statistic#=12 and si.sid=s.sid

and bg.paddr(+)=p.addr 

and round((ss.value/100),0) > 10 

order by 8;

To know what the session is doing and what kind of sql it is using:


-- what kind of sql a session is using

set lines 9999

set pages 9999


select s.sid, q.sql_text from v$sqltext q, v$session s

where q.address = s.sql_address

and s.sid = &sid order by piece;


eg: sid=1853


17)Sync arch:

In a Dataguard environment we have to check primary is in sync with the secondary Database.This we can check as follows:

The V$ MANAGED_STANDBY view on the standby database site shows you the activities performed by 

both redo transport and Redo Apply processes in a Data Guard environment

SELECT PROCESS, CLIENT_PROCESS, SEQUENCE#, STATUS FROM V$MANAGED_STANDBY;

In some situations, automatic gap recovery may not take place and you will need to perform gap recovery manually. For example, you will need to perform gap recovery manually if you are using logical standby databases and the primary database is not available.

The following sections describe how to query the appropriate views to determine which log files are missing and perform manual recovery.

On a physical standby database

To determine if there is an archive gap on your physical standby database, query the V$ARCHIVE_GAP view as shown in the following example:

SQL> SELECT * FROM V$ARCHIVE_GAP;


If it displays no rows than the primary Database is in sync with the standy Database.If it display any information with row than manually we have to apply the archive logs.


After you identify the gap, issue the following SQL statement on the primary database to locate the archived redo log files on your primary database (assuming the local archive destination on the primary database is LOG_ARCHIVE_DEST_1):

Eg:

SELECT NAME FROM V$ARCHIVED_LOG WHERE THREAD#=1 AND DEST_ID=1 AND SEQUENCE# BETWEEN 7 AND 10;

Copy these log files to your physical standby database and register them using the ALTER DATABASE REGISTER LOGFILE statement on your physical standby database. For example:

SQL> ALTER DATABASE REGISTER LOGFILE

'/physical_standby1/thread1_dest/arcr_1_7.arc';

SQL> ALTER DATABASE REGISTER LOGFILE

'/physical_standby1/thread1_dest/arcr_1_8.arc';


After you register these log files on the physical standby database, you can restart Redo Apply. The V$ARCHIVE_GAP fixed view on a physical standby database only returns the next gap that is currently blocking Redo Apply from continuing. After resolving the gap and starting Redo Apply, query the V$ARCHIVE_GAP fixed view again on the physical standby database to determine the next gap sequence, if there is one. Repeat this process until there are no more gaps.


On a logical standby database:

To determine if there is an archive gap, query the DBA_LOGSTDBY_LOG view on the logical standby database. For example, the following query indicates there is a gap in the sequence of archived redo log files because it displays two files for THREAD 1 on the logical standby database. (If there are no gaps, the query will show only one file for each thread.) The output shows that the highest registered file is sequence number 10, but there is a gap at the file shown as sequence number 6:

SQL> COLUMN FILE_NAME FORMAT a55

SQL> SELECT THREAD#, SEQUENCE#, FILE_NAME FROM DBA_LOGSTDBY_LOG L

2> WHERE NEXT_CHANGE# NOT IN

3> (SELECT FIRST_CHANGE# FROM DBA_LOGSTDBY_LOG WHERE L.THREAD# = THREAD#)

4> ORDER BY THREAD#,SEQUENCE#;


THREAD# SEQUENCE# FILE_NAME

---------- ---------- -----------------------------------------------

1 6 /disk1/oracle/dbs/log-1292880008_6.arc

1 10 /disk1/oracle/dbs/log-1292880008_10.arc


Copy the missing log files, with sequence numbers 7, 8, and 9, to the logical standby system and register them using the ALTER DATABASE REGISTER LOGICAL LOGFILE statement on your logical standby database. For example:

SQL> ALTER DATABASE REGISTER LOGICAL LOGFILE '/disk1/oracle/dbs/log-1292880008_10.arc';


After you register these log files on the logical standby database, you can restart SQL Apply.


The DBA_LOGSTDBY_LOG view on a logical standby database only returns the next gap that is currently blocking SQL Apply from continuing. After resolving the identified gap and starting SQL Apply, query the DBA_LOGSTDBY_LOG view again on the logical standby database to determine the next gap sequence, if there is one. Repeat this process until there are no more gaps.

Monitoring Log File Archival Information:

Step 1 Determine the current archived redo log file sequence numbers.

Enter the following query on the primary database to determine the current archived redo log file sequence numbers:

SQL> SELECT THREAD#, SEQUENCE#, ARCHIVED, STATUS FROM V$LOG

WHERE STATUS='CURRENT';

Step 2 Determine the most recent archived redo log file.

Enter the following query at the primary database to determine which archived redo log file contains the most recently transmitted redo data:

SQL> SELECT MAX(SEQUENCE#), THREAD# FROM V$ARCHIVED_LOG GROUP BY THREAD#;

Step 3 Determine the most recent archived redo log file at each destination.

Enter the following query at the primary database to determine which archived redo log file was most recently transmitted to each of the archiving destinations:

SQL> SELECT DESTINATION, STATUS, ARCHIVED_THREAD#, ARCHIVED_SEQ#

2> FROM V$ARCHIVE_DEST_STATUS

3> WHERE STATUS <> 'DEFERRED' AND STATUS <> 'INACTIVE';


DESTINATION STATUS ARCHIVED_THREAD# ARCHIVED_SEQ#

------------------ ------ ---------------- -------------

/private1/prmy/lad VALID 1 947

standby1 VALID 1 947

The most recently written archived redo log file should be the same for each archive destination listed. If it is not, a status other than VALID might identify an error encountered during the archival operation to that destination.

Step 4 Find out if archived redo log files have been received.

You can issue a query at the primary database to find out if an archived redo log file was not received at a particular site. Each destination has an ID number associated with it. You can query the DEST_ID column of the V$ARCHIVE_DEST fixed view on the primary database to identify each destination's ID number.

Assume the current local destination is 1, and one of the remote standby destination IDs is 2. To identify which log files are missing at the standby destination, issue the following query:

SQL> SELECT LOCAL.THREAD#, LOCAL.SEQUENCE# FROM

2> (SELECT THREAD#, SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=1)

3> LOCAL WHERE

4> LOCAL.SEQUENCE# NOT IN

5> (SELECT SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=2 AND

6> THREAD# = LOCAL.THREAD#);


THREAD# SEQUENCE#

--------- ---------

1 12

1 13

1 14


18)Purge arch:

We have to make sure the archive logs files are purged safely or move to Tape drive or any other location in order to make space for new archive logs files in the Archive logs destination locations.


19)Recovery status:

In order to do recover make sure you are having latest archive logs,so that you can restore and do the recovery if required.



20) MY DATABASE HEALTH CHECK SCRIPT:

/* SCRIPT FOR MONITORING AND CHECKING HEALTH OF DATABASE-USEFUL FOR PRODUCTION DATABASES */


-- SHOWS RUNNING JOBS

select 'RUNNING JOBS', sid, job,instance from dba_jobs_running;

set lines 1000

-- SHOWS ARCHIVE LOGS GENERAION DETAILS HOURLY AND DATE WISE BASIS

select 'ARCHIVE LOG REPORT',to_char(first_time,'DD-MON-RR') "Date",

to_char(sum(decode(to_char(first_time,'HH24'),'00',1,0)),'99') " 00",

to_char(sum(decode(to_char(first_time,'HH24'),'01',1,0)),'99') " 01",

to_char(sum(decode(to_char(first_time,'HH24'),'02',1,0)),'99') " 02",

to_char(sum(decode(to_char(first_time,'HH24'),'03',1,0)),'99') " 03",

to_char(sum(decode(to_char(first_time,'HH24'),'04',1,0)),'99') " 04",

to_char(sum(decode(to_char(first_time,'HH24'),'05',1,0)),'99') " 05",

to_char(sum(decode(to_char(first_time,'HH24'),'06',1,0)),'99') " 06",

to_char(sum(decode(to_char(first_time,'HH24'),'07',1,0)),'99') " 07",

to_char(sum(decode(to_char(first_time,'HH24'),'08',1,0)),'99') " 08",

to_char(sum(decode(to_char(first_time,'HH24'),'09',1,0)),'99') " 09",

to_char(sum(decode(to_char(first_time,'HH24'),'10',1,0)),'99') " 10",

to_char(sum(decode(to_char(first_time,'HH24'),'11',1,0)),'99') " 11",

to_char(sum(decode(to_char(first_time,'HH24'),'12',1,0)),'99') " 12",

to_char(sum(decode(to_char(first_time,'HH24'),'13',1,0)),'99') " 13",

to_char(sum(decode(to_char(first_time,'HH24'),'14',1,0)),'99') " 14",

to_char(sum(decode(to_char(first_time,'HH24'),'15',1,0)),'99') " 15",

to_char(sum(decode(to_char(first_time,'HH24'),'16',1,0)),'99') " 16",

to_char(sum(decode(to_char(first_time,'HH24'),'17',1,0)),'99') " 17",

to_char(sum(decode(to_char(first_time,'HH24'),'18',1,0)),'99') " 18",

to_char(sum(decode(to_char(first_time,'HH24'),'19',1,0)),'99') " 19",

to_char(sum(decode(to_char(first_time,'HH24'),'20',1,0)),'99') " 20",

to_char(sum(decode(to_char(first_time,'HH24'),'21',1,0)),'99') " 21",

to_char(sum(decode(to_char(first_time,'HH24'),'22',1,0)),'99') " 22",

to_char(sum(decode(to_char(first_time,'HH24'),'23',1,0)),'99') " 23"

from v$log_history

group by to_char(first_time,'DD-MON-RR')

order by 1

/

-- WHAT ALL THE SESSIONS ARE GETTING BLOCKED

select 'SESSIONS BLOCKED',process,sid, blocking_session from v$session where blocking_session is not null;

-- WHICH SESSION IS BLOCKING WHICH SESSION

set lines 9999

set pages 9999

select s1.username || '@' || s1.machine

|| ' ( SID=' || s1.sid || ' ) is blocking '

|| s2.username || '@' || s2.machine || ' ( SID=' || s2.sid || ' ) ' AS blocking_status

from v$lock l1, v$session s1, v$lock l2, v$session s2

where s1.sid=l1.sid and s2.sid=l2.sid

and l1.BLOCK=1 and l2.request > 0

and l1.id1 = l2.id1

and l2.id2 = l2.id2 ;

-- SHOWS BLOCK CHANGES DETAILS AND PHYSICAL READS DETAIL

select a.sid,b.username,block_gets,consistent_gets,physical_reads,block_changes

from V$SESS_IO a,V$SESSION b

where a.sid=b.sid and block_changes > 10000 order by block_changes desc;

-- show IO per session / CPU in seconds. sessionIOS.sql

set linesize 140

col spid for a6

col program for a35 trunc

select p.spid SPID,to_char(s.LOGON_TIME,'DDMonYY HH24:MI') date_login,s.username,decode(nvl(p.background,0),1,bg.description, s.program ) program,ss.value/100 CPU,physical_reads disk_io,(trunc(sysdate,'J')-trunc(logon_time,'J')) days,round((ss.value/100)/(decode((trunc(sysdate,'J')-trunc(logon_time,'J')),0,1,(trunc(sysdate,'J')-trunc(logon_time,'J')))),2) cpu_per_day

from V$PROCESS p,V$SESSION s,V$SESSTAT ss,V$SESS_IO si,V$BGPROCESS bg

where s.paddr=p.addr and ss.sid=s.sid

and ss.statistic#=12 and si.sid=s.sid

and bg.paddr(+)=p.addr

and round((ss.value/100),0) > 10

order by 8;

-- SCRIPT TO IDENTIFY LONG RUNNING STATEMENTS

rem LONGOPS.SQL

rem Long Running Statements

rem Helmut Pfau, Oracle Deutschland GmbH

set linesize 120

col opname format a20

col target format a15

col units format a10

col time_remaining format 99990 heading Remaining[s]

col bps format 9990.99 heading [Units/s]

col fertig format 90.99 heading "complete[%]"

select sid,

opname,

target,

sofar,

totalwork,

units,

(totalwork-sofar)/time_remaining bps,

time_remaining,

sofar/totalwork*100 fertig

from v$session_longops

where time_remaining > 0

/

-- ACTIVE SESSIONS IN DATABASE

select 'ACTIVE SESSION', sid, serial#,machine, osuser,username,status from v$session where username!='NULL' and status='ACTIVE';

-- WHAT SQL A SESSION IS USING

set lines 9999

set pages 9999

select s.sid, q.sql_text from v$sqltext q, v$session s

where q.address = s.sql_address

and s.sid = &sid order by piece;


eg:SID=1844

I would like to add one more script which will tell me details regarding the Size of the Database used,occupied and available and Tablespace usage

details along with hit ratio of various SGA components which can be very helpfull

to monitor the performance of the Databases.


Database_monitor.sql:


ttitle "1. :============== Tablespace Usage Information ==================:" skip 2 

set linesize 140

col Total format 99999.99 heading "Total space(MB)"

col Used format 99999.99 heading "Used space(MB)"

col Free format 99999.99 heading "Free space(MB)"

break on report

compute sum of Total space(MB) on report

compute sum of Used space(MB) on report

compute sum of Free space(MB) on report

select a.tablespace_name, round(a.bytes/1024/1024,2) Total, 

round( nvl( b.bytes,0)/1024/1024,2) Used,

round(nvl(c.bytes, 0)/1024/1024,2) Free , 

round(nvl(b.bytes,0)*100/nvl(a.bytes,0),2) "% Used"

from sys.sm$ts_avail a, sys.sm$ts_used b, sys.sm$ts_free c 

where a.tablespace_name=b.tablespace_name(+) 

and b.tablespace_name=c.tablespace_name(+);


ttitle "2. :============== Hit Ratio Information ==================:" skip 2 

set linesize 80

clear columns

clear breaks

set pagesize 60 heading off termout off echo off verify off

REM

col val1 new_val lib noprint

select 100*(1-(SUM(Reloads)/SUM(Pins))) val1

from V$LIBRARYCACHE;


ttitle off

col val2 new_val dict noprint

select 100*(1-(SUM(Getmisses)/SUM(Gets))) val2

from V$ROWCACHE;


ttitle off

col val3 new_val phys_reads noprint

select Value val3

from V$SYSSTAT

where Name = 'physical reads';


ttitle off

col val4 new_val log1_reads noprint

select Value val4

from V$SYSSTAT

where Name = 'db block gets';


ttitle off

col val5 new_val log2_reads noprint

select Value val5

from V$SYSSTAT

where Name = 'consistent gets';


ttitle off

col val6 new_val chr noprint

select 100*(1-(&phys_reads / (&log1_reads + &log2_reads))) val6

from DUAL;


ttitle off

col val7 new_val avg_users_cursor noprint

col val8 new_val avg_stmts_exe noprint

select SUM(Users_Opening)/COUNT(*) val7,

SUM(Executions)/COUNT(*) val8

from V$SQLAREA;

ttitle off

set termout on

set heading off

ttitle -

center 'SGA Cache Hit Ratios' skip 2


select 'Data Block Buffer Hit Ratio : '||&chr db_hit_ratio,

' Shared SQL Pool ',

' Dictionary Hit Ratio : '||&dict dict_hit,

' Shared SQL Buffers (Library Cache) ',

' Cache Hit Ratio : '||&lib lib_hit,

' Avg. Users/Stmt : '||

&avg_users_cursor||' ',

' Avg. Executes/Stmt : '||

&avg_stmts_exe||' '

from DUAL;


ttitle "3. :============== Sort Information ==================:" skip 2 


select A.Value Disk_Sorts,

B.Value Memory_Sorts,

ROUND(100*A.Value/

DECODE((A.Value+B.Value),0,1,(A.Value+B.Value)),2)

Pct_Disk_Sorts

from V$SYSSTAT A, V$SYSSTAT B

where A.Name = 'sorts (disk)'

and B.Name = 'sorts (memory)';


ttitle "4. :============== Database Size Information ==================:" skip 2 



select sum(bytes/1024/1024/1024) Avail from sm$ts_avail union all select sum(bytes/1024/1024/1024) Used from sm$ts_used union all select sum(bytes/1024/1024/1024) Free from sm$ts_free;

Killing multiple sessions in Oracle Database 

Hi,


We often get task to kill sessions which are active,inactive and the session related with a particular program.When we have one or two sessions we can kill it easily,if we have multiple sessions we can do it as follows:



Step 1:Check the Name of the Database

sqlplus "/as sysdba"


SQL>Select name from v$database;


Step 2:How to kill all inactive session in Database


sqlplus "/as sysdba"


SQL>set heading off


SQL>spool kill12.sql


SELECT 'ALTER SYSTEM KILL SESSION '||''''||sid ||','|| serial#||''''||' immediate;'

 FROM v$session

 WHERE status ='INACTIVE' and type != 'BACKGROUND';


SQL>spool off


then execute the sql script


SQL> @kill12.sql


Note:For RAC Database where we have more than 1 instances,we need to follow step 2 on all the Oracle instances.


How to kill all active session in Database:

sqlplus "/as sysdba"


SQL>set heading off

SQL>spool kill_active.sql


SELECT 'ALTER SYSTEM KILL SESSION '||''''||sid ||','|| serial#||''''||' immediate;'

FROM v$session

WHERE status ='ACTIVE' and type != 'BACKGROUND';


SQL>spool off


Note:For RAC Database where we have more than 1 instances,we need to follow step 2 on all the Oracle instances. 



How to kill all ODI sessions in Database:

sqlplus "/as sysdba"


SQL>set heading off

SQL>set lines 1000

SQL>set linesize 2000

SQL>spool kill_active.sql


SELECT 'ALTER SYSTEM KILL SESSION '||''''||sid ||','|| serial#||''''||' immediate;'

FROM v$session

WHERE status ='ACTIVE' AND USERNAME LIKE '%ODI%';


SQL>spool off



Note:For RAC Database where we have more than 1 instances,we need to follow step 2 on all the Oracle instances.



How to get the list of Users and Processes running ODI sessions:


SQL> SET LINESIZE 100

COLUMN spid FORMAT A10

COLUMN username FORMAT A10

COLUMN program FORMAT A45


SELECT s.inst_id,s.sid,s.serial#,p.spid, s.username, s.program

 FROM   gv$session s

 JOIN gv$process p ON p.addr = s.paddr AND p.inst_id = s.inst_id

WHERE  s.type != 'BACKGROUND' AND S.USERNAME LIKE '%ODI%';  


 How to kill a particular object blocking session:


 1.Find the tables(objects) which are locked:


SELECT o.owner, o.object_name, o.object_type, o.last_ddl_time, o.status, l.session_id, l.oracle_username, l.locked_mode

FROM dba_objects o, gv$locked_object l

WHERE o.object_id = l.object_id and o.object_name='XX_OBJECT';


2.Killing the session holding the lock:


--Find the serial# for the sessions holding the lock:


SQL> select SERIAL# from v$session where SID=667;


SERIAL#

----------

21091


SQL> alter system kill session '667,21091';


Note:For RAC Database where we have more than 1 instances,we need to follow step 2 on all the Oracle instances.


Enjoy DBA tasks...


Happy DBA learning..


Comparing two schemas 

Hi,

We can compare two schemas with the help of tools like toad,But what if you don't have tool,We can do this with the help of the below queries:


/* Query 1:To compare the two users in terms of tables: */


SELECT TABLE_NAME FROM ALL_TABLES

WHERE TABLE_NAME NOT IN

(SELECT TABLE_NAME FROM ALL_TABLES WHERE OWNER='EMP1')

AND OWNER='EMP2';


/* Query 2:To compare the two users in terms of objects */


SELECT OBJECT_NAME||' '||OBJECT_TYPE FROM ALL_OBJECTS

WHERE OBJECT_NAME NOT IN

(SELECT OBJECT_NAME FROM ALL_OBJECTS WHERE OWNER='EMP1')

AND OWNER='EMP2';


Above queries can be very much helpful,when we transfer data from one schema to other schema and we want to verify them after successful transfer(i.e) after doing datapump or export import activities.Hope it helps.



Best regards,


Rafi. 

Posted by Rafi at 8:29 AM 1 comment:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA Queries 

Reactions:

Thursday, August 5, 2010

Useful Tips and Queries for Oracle DBA. 

Below are some useful tips regarding 'TEMPORARY TABLESPACE' :


Steps for dropping the Default temporary tablespace::

-----------------------------------------------

Step 1: create one drop1temp tablespace as below make it default.

-------


create tablespace drop1temp tempfile '/star/oradata/STARDEV/droptemp.ora' size 1000m;

ALTER DATABASE DEFAULT TEMPORARY TABLESPACE drop1temp;



step2:Drop the original default temporary tablespace now(eg:STA01T):

-----


DROP TABLESPACE STA01T INCLUDING CONTENTS AND DATAFILES;


* SOME THING A BIT TRICKY* 


Command for creating New temporary tablespace:

-----------------------------------------------------


CREATE TEMPORARY TABLESPACE STA01T TEMPFILE '/stage/oradata/STARTST/STA01.dbf' SIZE 6000M;


Increasing the size of the temporary tablespace:

-------------------------------------------------------


ORA-01652: unable to extend temp segment by 128 in tablespace STA01T



STEPS FOR ADDING TEMP FILE IN A TEMPORARY TABLESPACE:

---------------------------------------------


Step 1:Add the temporary file.

--------

ALTER TABLESPACE STA01T ADD TEMPFILE '/stage/oradata/STARTST/star01t.dbf' SIZE 5000M;


Step 2:Shut normal and start the Database for changes to take effect.

--------

SQL>Shut immediate;

SQL>startup


Note:Restart the database for changes to take effect...


Step 3:Check the filesystem and space now:

----------------------------------------------

-- The below query can be used for checking datafile size and tempfile sizes.


select file_name||' '||tablespace_name||' '||(bytes)/1024/1024 from dba_data_files;


select file_name||' '||tablespace_name||' '||(bytes)/1024/1024 from dba_temp_files;


We can see a tempfile of size 5000M in our Database.


Below are some useful Queries for Oracle DBA:

-------------------------------------------------------------



BELOW QUERY YOU CAN USE IN ORACLE 11g ony to know all temporay tablespaces details:

-------------------------------------

select tablespace_name,

tablespace_size/1024/1024 "Total Space",

allocated_space/1024/1024 "Alloc Space",

free_space/1024/1024 "Free Space"

from dba_temp_free_space;


SELECT tablespace_name, SUM(bytes_used)/1024/1024, SUM(bytes_free)/1024/1024

FROM V$temp_space_header

GROUP BY tablespace_name; 



To know Default temporary tablespace:

---------------------------------------

SELECT * FROM DATABASE_PROPERTIES where PROPERTY_NAME='DEFAULT_TEMP_TABLESPACE';


-- The below query can be used to check Default Tablespace and Temporary Tablespace

For Database.


SELECT * FROM DATABASE_PROPERTIES where PROPERTY_NAME like '%DEFAULT%';


To know Default tablespace & temporary tablespace for user:

------------------------------------------------------------


select username,temporary_tablespace,default_tablespace from dba_users where username='STARREP';


To know Tablespace size Query the below:

-----------------------------------------


SQL>set linesize 1000


SELECT tablespace_name, ROUND(SUM(total_mb)-SUM(free_mb)) CUR_USE_MB, ROUND(SUM(total_mb)) CUR_SZ_MB,

ROUND((SUM(total_mb)-SUM(free_mb))/SUM(total_mb)*100) CUR_PCT_FULL, ROUND(SUM(max_mb) - (SUM(total_mb)-SUM(free_mb))) FREE_SPACE_MB,

ROUND(SUM(max_mb)) MAX_SZ_MB, ROUND((SUM(total_mb)-SUM(free_mb))/SUM(max_mb)*100) PCT_FULL

FROM (

SELECT tablespace_name, SUM(bytes)/1024/1024 FREE_MB,

0 TOTAL_MB, 0 MAX_MB

FROM dba_free_space

GROUP BY tablespace_name

UNION

SELECT tablespace_name, 0 CURRENT_MB,

SUM(bytes)/1024/1024 TOTAL_MB,

SUM(DECODE(maxbytes,0,bytes, maxbytes))/1024/1024 MAX_MB

FROM dba_data_files

GROUP BY tablespace_name)

GROUP BY tablespace_name;


tablespace usage:

=================


SELECT F.TABLESPACE_NAME,TO_CHAR ((T.TOTAL_SPACE - F.FREE_SPACE),'999,999') "USEDMB",

TO_CHAR (F.FREE_SPACE, '999,999') "FREEMB",

TO_CHAR (T.TOTAL_SPACE, '999,999') "TOTALMB",

TO_CHAR ((ROUND ((F.FREE_SPACE/T.TOTAL_SPACE)*100)),'999')||' %' FREE

FROM (SELECT TABLESPACE_NAME, 

ROUND (SUM (BLOCKS*(SELECT VALUE/1024

FROM V$PARAMETER 

WHERE NAME = 'db_block_size')/1024) ) FREE_SPACE

FROM DBA_FREE_SPACE

GROUP BY TABLESPACE_NAME ) F,

(

SELECT TABLESPACE_NAME,

ROUND (SUM (BYTES/1048576)) TOTAL_SPACE

FROM DBA_DATA_FILES

GROUP BY TABLESPACE_NAME ) T

WHERE F.TABLESPACE_NAME = T.TABLESPACE_NAME;


Resizing Tablespace without adding datafile:

--------------------


ALTER DATABASE DATAFILE '/work/oradata/STARTST/STAR02D.dbf' resize 2000M;


Checking autoextend on/off for Tablespaces:

select substr(file_name,1,50), AUTOEXTENSIBLE from dba_data_files

(OR)

SQL> select tablespace_name,AUTOEXTENSIBLE from dba_data_files;



Adding datafile to a tablespace:

alter tablespace star02D add datafile '/work/oradata/STARTST/sta05d.dbf' size 1000M autoextend off;


Increasing Datafile size:

-------------------------


Alter Database datafile '/u01/app/Test1_data_01.dbf' resize 2G;



Important:

----------------


Checking the default tablespace and default temp tablespace for ALL userS:


---------------------------------------------------------------------------


SQL>set linesize 1000


SQL> select default_tablespace,temporary_tablespace,username from dba_users;



undo usage details:

===========


SELECT TO_CHAR(s.sid)||','||TO_CHAR(s.serial#) sid_serial,

NVL(s.username, 'None') orauser,

s.program,

r.name undoseg,

t.used_ublk * TO_NUMBER(x.value)/1024||'K' "Undo"

FROM sys.v_$rollname r,

sys.v_$session s,

sys.v_$transaction t,

sys.v_$parameter x

WHERE s.taddr = t.addr

AND r.usn = t.xidusn(+)

AND x.name = 'db_block_size';


SID_SERIAL ORAUSER PROGRAM UNDOSEG Undo

---------- ---------- ------------------------------ --------------- -------

260,7 SCOTT sqlplus@localhost.localdomain _SYSSMU4$ 8K

(TNS V1-V3)



who is using a TEMP Segment: 

============================


SELECT b.tablespace,

ROUND(((b.blocks*p.value)/1024/1024),2)||'M' "SIZE",

a.sid||','||a.serial# SID_SERIAL,

a.username,

a.program

FROM sys.v_$session a,

sys.v_$sort_usage b,

sys.v_$parameter p

WHERE p.name = 'db_block_size'

AND a.saddr = b.session_addr

ORDER BY b.tablespace, b.blocks; 



PGA USAGE:

==========

select st.sid "SID", sn.name "TYPE",

ceil(st.value / 1024 / 1024/1024) "GB" 

from v$sesstat st, v$statname sn 

where st.statistic# = sn.statistic# 

and sid in 

(select sid from v$session where username like UPPER('&user'))

and upper(sn.name) like '%PGA%' 

order by st.sid, st.value desc 



long job:

=========


col username for a20 

col message for a50 

col remaining for 9999 

select username,to_char(start_time, 'hh24:mi:ss dd/mm/yy') started,

time_remaining remaining, message 

from v$session_longops 

where time_remaining = 0 

order by time_remaining desc 



CPU usage of the USER:

======================


select 

ss.username,

se.SID,

VALUE/100 cpu_usage_seconds

from

v$session ss, 

v$sesstat se, 

v$statname sn

where

se.STATISTIC# = sn.STATISTIC#

and

NAME like '%CPU used by this session%'

and

se.SID = ss.SID

and 

ss.status='ACTIVE'

and 

ss.username is not null

order by VALUE desc;


Running Jobs:

=============


select owner, job_name from DBA_SCHEDULER_RUNNING_JOBS;


select sid, job,instance from dba_jobs_running;


select sid, serial#,machine, status, osuser,username from v$session where username!='NULL';


How to find the Actual size of a Database?

select sum(bytes)/1024/1024/1024 as GB from dba_data_files;


Eg:

SQL> select sum(bytes)/1024/1024/1024 as GB from dba_data_files;


GB

----------

30.9667969


How to find the size occupied by Data in a Database or Database usage details?

select sum(bytes)/1024/1024/1024 as GB from dba_segments;

Eg:

SQL> select sum(bytes)/1024/1024/1024 as GB from dba_segments;


GB

----------

10.0961914

So,In the Above example Database size:30 GB,Database usage:10 GB.


How to find the size of the SCHEMA/USER?

We can find the size of the schema/user by query:


select sum(bytes/1024/1024)"size" from dba_segments where

owner='&owner';


Eg:SQL> select sum(bytes/1024/1024)"size" from dba_segments 

where owner='TEST_DEV';


size

----------

2.8125

So,In the above example size occupied by user 'TEST_DEV' is 2.8 MB.


How can a USER be granted privilege for executing a particular PACKAGE?


SQL> grant execute on DBMS_CRYPTO to TEST_USER;


Grant succeeded.


SQL> grant execute on DBMS_RANDOM to TEST_USER;


Grant succeeded.


SQL> grant execute on DBMS_UTILITY to TEST_USER;


Grant succeeded.


Here DBMS_CRYPTO,DBMS_RANDOM,DBMS_UTILITY

Note:For security reason DBA should not give execute privileges on all the packages.



How to see the parameters set in our Oracle Database?


SQL> select sid,name,value from v$spparameter where isspecified='TRUE';



Happy Oracle DBA learning...


Best regards,


Rafi. 

Posted by Rafi at 3:28 AM No comments:  Links to this post 

Email ThisBlogThis!Share to TwitterShare to FacebookShare to Pinterest

Labels: DBA Queries 

Reactions:

Saturday, July 17, 2010

Sessions and Process related Views of oracle 

Sessions and Process related Views of oracle: 


Sessions and Processes related important Queries of DBA are as follows: 


We can know the active sessions in my Database as follows: 

There are times that you may need to issue a “SHUTDOWN IMMEDIATE” command to an Oracle Database. It is critical that you should consider checking who are the users that are currently running sessions on that Database. You don’t want to shutdown on your Boss or Important person’s session. Here’s a simple SQL to find all Active sessions in your Oracle Database: 

1. SELECT SID, Serial#, UserName, Status, SchemaName, Logon_Time 

2. FROM V$Session 

3. WHERE 

4. Status=‘ACTIVE’ AND 

5. UserName IS NOT NULL; 

How to kill all sessions connected to my Database: 


Before killing sessions, if possible do 

ALTER SYSTEM ENABLE RESTRICTED SESSION; 

to stop new sessions from connecting. 

If you want to stop new users from connecting, but allow current sessions to continue until they are inactive, you can put the database in QUIESCE mode: 

ALTER SYSTEM QUIESCE RESTRICTED; 


SET LINESIZE 100 

COLUMN spid FORMAT A10 

COLUMN username FORMAT A10 

COLUMN program FORMAT A45 


SELECT s.inst_id, 

s.sid, 

s.serial#, 

p.spid, 

s.username, 

s.program 

FROM gv$session s 

JOIN gv$process p ON p.addr = s.paddr AND p.inst_id = s.inst_id 

WHERE s.type != 'BACKGROUND'; 



KILL ALL SESSION BY GETTING ALTER SYSTEM SCRIPTS: 


Step 1:Generate all the sessions to be killed as follows: 

-------- 

SQL> select 'Alter system kill session '''||sid||','||serial#||''';' 

from v$session; 2 


'ALTERSYSTEMKILLSESSION'''||SID||','||SERIAL#||''';' 

-------------------------------------------------------------------------------- 

Alter system kill session '1839,3172'; 

Alter system kill session '1842,295'; 

Alter system kill session '1844,553'; 

Alter system kill session '1845,2707'; 

Alter system kill session '1846,219'; 

Alter system kill session '1847,1555'; 

(Or) 

Note:Use immediate option with alter system to rollback transactions 

Eg: Alter system kill session '1847,1555' immediate; 





Killing inactive session as follows: 



SQL> SELECT 'ALTER SYSTEM KILL SESSION'''||SID||','||SERIAL#||''';' 

2 FROM V$SESSION WHERE STATUS='INACTIVE'; 


'ALTERSYSTEMKILLSESSION'''||SID||','||SERIAL#||''';' 

-------------------------------------------------------------------------------- 

ALTER SYSTEM KILL SESSION'1835,1618'; 

ALTER SYSTEM KILL SESSION'1842,295'; 

ALTER SYSTEM KILL SESSION'1845,2707'; 

ALTER SYSTEM KILL SESSION'1850,114'; 

ALTER SYSTEM KILL SESSION'1855,11910'; 

KILL ALL SESSIONS OF PARTICULAR USER: 

----------------------------------------------------------------------- 

begin 

for sessions in ( select sid 

, serial# 

from v$session 

where username = 'user_to_kill_sessions') 

loop 

execute immediate 'alter system kill session '''||sessions.sid||','|sessions.serial#||''''; 

end loop; 

end; 


Some toad work import views: 


SELECT s.inst_id, 

s.sid, 

s.serial#, 

p.spid, 

s.username, 

s.program 

FROM gv$session s 

JOIN gv$process p ON p.addr = s.paddr AND p.inst_id = s.inst_id 

WHERE s.type != 'BACKGROUND'; 


select *from v$session; 

select *from v$process; 

select pid,username,program from v$process where username='20C1248'; 


Check while migration work in Sony <em>which user is using which program and status:------------------------------------------------------------------------------ 


SELECT s.sid,s.serial#,p.spid,s.username,s.osuser,s.program,s.module,s.status,s.action FROM v$process p,v$session s WHERE p.addr = s.paddr 

and machine like '%BHASKAR%'; 


SELECT s.sid,s.serial#,p.spid,s.osuser,s.program,s.module,s.status,s.action FROM v$process p,v$session s WHERE p.addr = s.paddr 

and osuser='20C1248'; 


SELECT a.username, a.osuser, b.spid 

FROM v$session a, v$process b 

WHERE a.paddr = b.addr 

AND a.username IS NOT null; 


select spid,serial# from v$process where spid='29467'; 


select 'Alter system kill session '''||sid||','||serial#||''';' 

from v$session; 


TO FIND THE BLOCKING SESSION ALONG WITH SID AND KILLING IN THE 

DATABASE: 

select process,sid, blocking_session from v$session where blocking_session is not null; 



Kill oracle process in Database: 

-------------------------------------------------------- 

select process,sid, blocking_session from v$session where blocking_session is not null; 

select SERIAL# from v$session where SID=1568; 

conn /as sysdba 

alter system kill session '732,11231'; 



How to see current transaction in Database? 



SQL>select a.sid, a.username, b.xidusn, b.used_urec, b.used_ublk 

from v$session a, v$transaction b 

where a.saddr = b.ses_addr; 



How to find the job related details scheduled from Oracle Database? 


SQL> SELECT 

JOB||' '||SCHEMA_USER||' '||LAST_DATE||' '||NEXT_DATE||' '||WHAT||' '||FAILURES 

FROM DBA_JOBS; 


How To check server reboot time on windows server: 



C:\Documents and Settings\Tc62MYNODE.APAC>net statistics server 


Server Statistics for \\MYNODEETD106244 



Statistics since 10/19/2011 2:22 PM 



Sessions accepted 1 

Sessions timed-out 7 

Sessions errored-out 12 


Kilobytes sent 62885 

Kilobytes received 15769 


Mean response time (msec) 0 


System errors 0 

Permission violations 0 

Password violations 0 


Files accessed 7275 

Communication devices accessed 0 

Print jobs spooled 0 


Times buffers exhausted 


Big buffers 0 

Request buffers 0 


The command completed successfully. 



How to know the logon time in minutes(secs) and OS user details login time to Oracle Database? 


Select to_char(logon_time,'dd/mm/yyyy hh24:mi:ss'),osuser,status,schemaname,machine from v$session where type !='BACKGROUND'; 


Comments

Popular posts from this blog

Setup of BOM (Bill of Material) on Oracle EBS R12

Interview Questions and Answers on O2C (Order to cash) in Oracle EBS R12