Skip to main content

Oracle Daily SQL

Enable System-wide tracing:
alter system set events '10046 trace name context forever,level 12';
Disabled in all sessions:
alter system set events '10046 trace name context off';
Session Tracing:
alter session set tracefile_identifier='10046';
alter session set timed_statistics = true;
alter session set statistics_level=all;
alter session set max_dump_file_size = unlimited;
alter session set events '10046 trace name context forever,level 12';
crsctl for Oracle RAC
./crsctl start cluster
./crsctl check crs
Make column to CLOB
ALTER TABLE TABLE_NAME MODIFY (COLUMN_NAME LONG);
ALTER TABLE TABLE_NAME MODIFY (COLUMN_NAME CLOB);
RENAME <TABLE_NAME> TO <NEW_TABLE_NAME>;
Rename column, change data type, drop column:
ALTER TABLE TABLE_NAME RENAME COLUMN C:OLUMN_NAME TO COLUMN_NAME1;
ALTER TABLE TABLE_NAME MODIFY (COLUMN_NAME VARCHAR2(10));
ALTER TABLE TABLE_NAME DROP COLUMN COLUMN_NAME;
Create Primary Key:
ALTER TABLE TABLE_NAME ADD CONSTRAINT CONSTRAINT_NAME PRIMARY KEY (COLUMN1,COLUMN2,COLUMN3,COLUMN4) USING INDEX;
Create Unique Index, Rename Index:
CREATE UNIQUE INDEX INDEX_NAME ON TABLE_NAME (COLUMN1,COLUMN2,COLUMN3,COLUMN4);
ALTER INDEX <INDEX_NAME> RENAME TO <NEW_INDEX>;
Privilege and Role management:
SELECT * FROM ROLE_SYS_PRIVS;
Create synonym:
create synonym <SYNONYM_NAME>for  <SCHEMA_NAME>.<TABLE_NAME>;
Add Data file:
select FILE_NAME || '    ' || TABLESPACE_NAME from dba_temp_files;

ALTER TABLESPACE <TABLESPACE_NAME> ADD DATAFILE '/u01/data_file/<file_name>.dbf' SIZE 5G AUTOEXTEND ON;

Find out Tablespace size
select b.tablespace_name, tbs_size SizeMb, a.free_space FreeMb from (select tablespace_name, round(sum(bytes)/1024/1024 ,2) as free_space
from dba_free_space group by tablespace_name) a,
(select tablespace_name, sum(bytes)/1024/1024 as tbs_size
from dba_data_files
group by tablespace_name
UNION
select tablespace_name, sum(bytes)/1024/1024 tbs_size
from dba_temp_files
group by tablespace_name ) b where a.tablespace_name(+)=b.tablespace_name;

Generate script for permission for TABLE in schema:
SELECT 'GRANT SELECT,INSERT,UPDATE,DELETE ON <SCHEMA_NAME>.' || OBJECT_NAME || ' ' ||  'TO <USER_NAME>;' FROM USER_OBJECTS WHERE OBJECT_TYPE='TABLE' ORDER BY OBJECT_NAME;

Explian plan for sql_id:
set pagesize 0
set linesize 5000
spool plan_q1.log
select * from table(dbms_xplan.display_awr('sql_id'));
spool off
Kill session in RAC environment
ALTER SYSTEM KILL SESSION '<SID>,<SERIAL#>,@<INSTANCE_NO>' IMMEDIATE;
List compression for the table partition
SET PAGESIZE 0
COLUMN TABLE_NAME FORMAT A30
COLUMN COMPRESSION FORMAT A30
SELECT DISTINCT TABLE_NAME, COMPRESSION FROM DBA_TAB_PARTITIONS WHERE COMPRESSION IN ('DISABLED') AND TABLE_OWNER='<TABLE_OWNER>';
SELECT DISTINCT TABLE_NAME, COMPRESSION FROM DBA_TAB_PARTITIONS WHERE COMPRESSION IN ('ENABLED') AND TABLE_OWNER='<TABLE_OWNER>';


List partitoned indexes
SET PAGESIZE 0
COLUMN INDEX_NAME FORMAT A30
COLUMN PARTITION_NAME FORMAT A30
SELECT INDEX_NAME, PARTITION_NAME FROM DBA_IND_PARTITIONS WHERE INDEX_OWNER='<TABLE_OWNER>' ORDER BY INDEX_NAME;

List index name partitioning type for tables:
SET PAGESIZE 0
SET LINESIZE 1000
COLUMN TABLE_NAME FORMAT A30
COLUMN INDEX_NAME FORMAT A30
COLUMN PARTITIONING_TYPE FORMAT A30
SELECT TABLE_NAME, INDEX_NAME, PARTITIONING_TYPE FROM ALL_PART_INDEXES WHERE OWNER='<TABLE_OWNER>';

SET PAGESIZE 0
SET LINESIZE 1000
COLUMN TABLE_NAME FORMAT A50
COLUMN COMPRESSOIN FORMAT A20
COLUMN COMPRESS_FOR FORMAT A10

SELECT DISTINCT TABLE_NAME, COMPRESSION,COMPRESS_FOR FROM DBA_TAB_PARTITIONS WHERE TABLE_OWNER='<TABLE_OWNER>' AND COMPRESSION='ENABLED'

List all tables and degree of parallelism:
set pagesize 0
spool table_degree.txt
COLUMN TABLE_NAME FORMAT A30
COLUMN DEGREE FORMAT A10
SELECT TABLE_NAME, DEGREE

FROM DBA_TABLES
WHERE TABLE_NAME IN (SELECT OBJECT_NAME FROM DBA_OBJECTS WHERE OBJECT_TYPE='TABLE' AND OWNER='<TABLE_OWNER>') ORDER BY DEGREE DESC;
spool off


List all indexes and degree of parallel:
set pagesize 0
spool index_degree.txt
COLUMN INDEX_NAME FORMAT A30
COLUMN DEGREE FORMAT A10
SELECT INDEX_NAME, DEGREE FROM DBA_INDEXES WHERE INDEX_NAME IN (SELECT OBJECT_NAME FROM DBA_OBJECTS WHERE OBJECT_TYPE='INDEX' AND OWNER='<TABLE_OWNER>') ORDER BY DEGREE DESC;
spool off

 
gen_datafile.sql
set head off
pages 0
echo off
verify off
feed off
set linesize 500
spool ren_datafiles.lst
select 'SET NEWNAME FOR DATAFILE ' || FILE# || ' TO ''' || '/u02/oradata/&1/' || substr(name,instr(name,'/',-1)+1) || ''';'
from v$datafile;
spool off
exit;

gen_logfiles.sql
set head off
pages 0
echo off
feed off
spool re_logfiles.lst
SELECT 'SQL "ALTER DATABASE RENAME FILE '''''||  MEMBER ||'''''' ||chr(10)||'to ''''' || member || '''''" ;'
FROM V$LOGFILE;
exit

Check cell smart scan is enables for ASM disk or not:
 Make sure to connect ASM instance, if you connect to DB instance you won't find value
SELECT dg.name AS diskgroup, SUBSTR(a.name,1,24) AS name, SUBSTR(a.value,1,24) AS value
FROM V$ASM_DISKGROUP dg,
V$ASM_ATTRIBUTE a
WHERE dg.group_number = a.group_number and a.NAME = 'cell.smart_scan_capable';

Monitor Lock for Oracle:

select * from v$lock where block=1 ;

If a session holds a lock that's blocking another session, BLOCK=1.
select * from v$lock where REQUEST > 0

The blocked session will have the same values in ID1 and ID2 as the blocking session, and, since it is requesting a lock it's unable to get, it will have REQUEST > 0.

select l1.sid, ' IS BLOCKING ', l2.sid from v$lock l1, v$lock l2 where
l1.block =1 and
l2.request > 0 and
l1.id1=l2.id1 and
l1.id2=l2.id2

select s1.username || '@' || s1.machine || ' ( SID=' || s1.sid || ' )  is blocking ' || s2.username || '@' || s2.machine || ' ( SID=' || s2.sid || ' ) ' AS blocking_status
from v$lock l1, v$session s1, v$lock l2, v$session s2 where
s1.sid=l1.sid and
s2.sid=l2.sid and
l1.BLOCK=1 and
l2.request > 0 and
l1.id1 = l2.id1 and
l2.id2 = l2.id2 ;


select  (select username from v$session where sid=a.sid) blocker, a.sid, ' is blocking ',
   (select username from v$session where sid=b.sid) blockee, b.sid
from
   v$lock a,
   v$lock b
where
   a.block = 1 and
   b.request > 0 and
   a.id1 = b.id1 and
   a.id2 = b.id2;


Select SQL_TEXT from DBA_HIST_SQLTEXT

SET PAGESIZE 0
set long 2000000000
SPOOL sqltext.txt
SELECT SQL_TEXT FROM DBA_HIST_SQLTEXT;
spool off

 
table last time analyzed:
select owner,table_name,last_analyzed, global_stats
from dba_tables
where table_name in ('<table_name>') and
owner = '<owner_name>'

partition last time analyzed:
select table_owner, table_name, partition_name, last_analyzed, global_stats
from dba_tab_partitions
where table_owner in ('<owner_name>') and
table_name in ('<table_name>')
order by table_owner,table_name, partition_name

index last time analyzed:
select owner, index_name, last_analyzed, global_stats
from dba_indexes
where owner in ('<owner_name>') and
index_name in ('index_name')
order by owner, index_name

partitioned index last time analyzed:
select index_owner, index_name, partition_name, last_analyzed, global_stats
from dba_ind_partitions
where index_owner in ('<owner_name>') and
index_name in ('<index_name>')
order by index_owner, index_name, partition_name
 
DML against table:
SET LINESIZE 500
SET PAGESIZE 0
COLUMN TABLE_NAME FORMAT A30
SELECT TABLE_NAME, INSERTS, UPDATES,DELETES,TRUNCATED, TIMESTAMP FROM ALL_TAB_MODIFICATIONS WHERE TABLE_OWNER IN ('<TABLE_OWNER>') ORDER BY TIMESTAMP DESC ;


Check incremental statistics is implemented or not:
SELECT dbms_stats.get_prefs('INCREMENTAL','<schema_name>','<table_name>') "INCREMENTAL" FROM   dual;
Enable incremental statistics:
exec dbms_stats.set_table_prefs('<schema_name>','<table_name>','INCREMENTAL','TRUE');
Implement incremental statistics at partition level:
exec dbms_stats.gather_table_stats('<schema_name>','<table_name>',partname=>'<partition_name>',ESTIMATE_PERCENT=>DBMS_STATS.AUTO_SAMPLE_SIZE, granularity=>'PARTITION');


Granularity of statistics to collect (only pertinent if the table is partitioned).
DEFAULT: Gather global- and partition-level statistics.
SUBPARTITION: Gather subpartition-level statistics.
PARTITION: Gather partition-level statistics.
GLOBAL: Gather global statistics.
ALL: Gather all (subpartition, partition, and global) statistics.


List under score parameter which has TRUE value:
column "hidden parameter" format A50
column "value" format A20
set pagesize 0
select ksppinm as "hidden parameter", ksppstvl as "value"  from x$ksppi join x$ksppcv using (indx) where ksppstvl='TRUE';


List specific under score parameter which has TRUE value:
column "hidden parameter" format A50
column "value" format A20
select ksppinm as "hidden parameter", ksppstvl as "value"  from x$ksppi join x$ksppcv using (indx) where ksppinm like '\_%' escape '\' and ksppinm like '_db_fast_obj_ckpt'  order by ksppinm;


Ref.:

https://www.toadworld.com/platforms/oracle/w/wiki/11434.sophisticated-incremental-statistics-gathering-feature-in-12c
http://www.oracle.com/technetwork/database/bi-datawarehousing/twp-optimizer-with-oracledb-12c-1963236.pdf
Explore scripts at Guy Harrison - http://guyharrison.squarespace.com/opsgsamples/
 

Comments

Popular posts from this blog

Oralce Pluggable Database

Pluggable database administration:-

Open all pluggable databases:
Connect to container database using oraenv
$. oraenv=DB_Name
$sqlplus "/as sysdba"
SQL> alter pluggable database all open;
Get pluggable database status:
SQL> select con_id, dbid, name, open_mode from v$pdbs;
Switch to Puggable database from container :
SQL> alter session set container = <Puggable_DB>;
Get status of Pluggable database
SELECT v.name, v.open_mode, NVL(v.restricted, 'n/a') "RESTRICTED", d.status
FROM v$pdbs v, dba_pdbs d WHERE v.guid = d.guid ORDER BY v.create_scn;
Get file location for pluggable database:
SQL>select file_name from dba_data_files;
Create table space for pluggable datatbase:
SQL> alter session set container=<Pluggable_DB>;
Session altered.
SQL> CREATE TABLESPACE TB_Name LOGGING DATAFILE '/<Path>/<File_Name>.dbf' SIZE 1024M REUSE AUTOEXTEND ON NEXT 8192K MAXSIZE UNLIMITED EXTENT MANAGEMENT LOCAL UNIFORM SIZE 1M;
Set new t…

Oracle Golden Gate Basic Classic

Create environment
01. Source & target - Create GG tablespace.
02. Source & target - Create the GoldenGate Schema Owner
03. Grant privilege including DBA to GG schema owner
04. Add schema owner to global parameter file ./GLOBALS
05. Execute role set up script to create role GGS_GGSUSER_ROLE
06. Grant role GGS_GGSUSER_ROLE to GG user at both Source and target
Configure GG  Extract
01. Source and Target - Configure Manager Parameters
$ ggsci
GGSCI 1> EDIT PARAMS MGR
PORT 7809
DYNAMICPORTLIST 7810-7820
02. Source, create parameter file for Extract - ex1,
EXTRACT ex1
USERID <id>, PASSWORD <pwd>
EXTTRAIL /home/oracle/goldengate/dirdat/ex
TABLE <Schema>.*;
03. Source, configure Data Pump Parameters
EXTRACT dp1
USERID <id>, PASSWORD <pwd>
RMTHOST <hostname>, MGRPORT 7809
RMTTRAIL /home/oracle/goldengate/dirdat/rt
TABLE <schema>.*;
Target - Create check point table
$ ggsci
GGSCI 1> DBLOGIN USERID <id>, PASSWORD <pwd>
GGSCI 2>…