Sunday, January 20, 2013

Steps for creating Outlines


SQL> set line 200 pagesize 100 echo on
SQL> create user scott identified by tiger ;
User created.
SQL> alter user scott default tablespace users temporary tablespace temp;
User altered.
SQL> grant connect , resource , execute any procedure to scott;
Grant succeeded.
sql> Grant , connect, resource to scott;
SQL> grant create any outline to scott;
Grant succeeded.
SQL> grant execute on dbms_outln to scott;
Grant succeeded.
SQL> grant execute on dbms_outln_edit to scott;
SQL> grant create any outline, drop any outline to scott;
GRANT EXECUTE_CATALOG_ROLE TO SCOTT;
SQL> Grant  EXECUTE ANY PROCEDURE to scott;
To confirm login with scott and view the session privelidges -
SQL> connect scott/tiger
Connected.
SQL> select * from session_privs
  2  /
PRIVILEGE
----------------------------------------
CREATE SESSION
UNLIMITED TABLESPACE
CREATE TABLE
CREATE CLUSTER
CREATE SEQUENCE
CREATE PROCEDURE
EXECUTE ANY PROCEDURE
CREATE TRIGGER
CREATE TYPE
CREATE OPERATOR
CREATE INDEXTYPE
11 rows selected.
---- Login to scott schema  and create table, insert records and create index
CREATE TABLE scott.emp
   (empno     NUMBER        ,
    ename     VARCHAR2(10)  ,
       job        VARCHAR2(9),
       mgr        NUMBER    ,
       hiredate  DATE          DEFAULT SYSDATE,
       sal       NUMBER(10,2) 
  ,
          comm      NUMBER(9,0)   DEFAULT NULL,
          deptno    NUMBER(2)    
                            )
/
Insert few records

INSERT INTO SCOTT."EMP" (EMPNO, ENAME, JOB, HIREDATE, SAL, COMM, DEPTNO) VALUES ('10', 'John', 'MGR', TO_DATE('01-Dec-2012', 'DD-MON-RR'), '1000', '100', '40')
/
INSERT INTO SCOTT."EMP" (EMPNO, ENAME, JOB, HIREDATE, SAL, COMM, DEPTNO) VALUES ('20', 'Eurel', 'QA', TO_DATE('02-Dec-2012', 'DD-MON-RR'), '2000', '200', '40')
/
INSERT INTO SCOTT."EMP" (EMPNO, ENAME, JOB, HIREDATE, SAL, COMM, DEPTNO) VALUES ('40', 'Dave', 'Tester', TO_DATE('04-DEC-2012', 'DD-MON-RR'), '2400', '24', '30')
INSERT INTO SCOTT."EMP" (EMPNO, ENAME, JOB, HIREDATE, SAL, COMM, DEPTNO) VALUES ('30', 'Lela', 'QA', TO_DATE('03-DEC-2012', 'DD-MON-RR'), '3000', '300', '30')
/
INSERT INTO SCOTT."EMP" (EMPNO, ENAME, JOB, HIREDATE, SAL, COMM, DEPTNO) VALUES ('50', 'TOM', 'MGR', TO_DATE('03-DEC-2012', 'DD-MON-RR'), '2400', '24', '40')
/

create index idx_emp_empno on emp(empno) tablespace users
/
/* Create sample outline by scott user and check later execution path */
SQL> create outline empoln on select * from emp where empno = 30;
-- This is just to verify in case if you want to see creation of outline has inserted plan records in the below tables in outln schema
Also creation of outline enters corresponding records in below tables in outln schema
sql> select count(*) from ......;  -- one by one you can see below all there tables as this is optional step
ol$nodes
ol$hints
ol$
-- Just to verify interactive plan by autotrace utility of Oracle
SQL> set autotrace trace explain
SQL>  select * from emp where empno = 30;
Execution Plan
----------------------------------------------------------
Plan hash value: 2941534610
------------------------------------------------------------------------------------
| Id  | Operation                   | Name          | Rows  | Bytes | Cost (%CPU)| T
------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT            |               |     1 |    87 |     2   (0)| 0
|   1 |  TABLE ACCESS BY INDEX ROWID| EMP           |     1 |    87 |     2   (0)| 0
|*  2 |   INDEX RANGE SCAN          | IDX_EMP_EMPNO |     1 |       |     1   (0)| 0
------------------------------------------------------------------------------------
Predicate Information (identified by operation id):
---------------------------------------------------
   2 - access("EMPNO"=30)
-- Actual steps starts here -----------------
Run as a scott (sample schema for testing)
SQl> alter session set optimizer_mode=rule
/
/*  SQL> ALTER SESSION SET query_rewrite_enabled=TRUE;  */
sql> alter session set use_stored_outlines=true;
sql> ALTER SESSION SET use_stored_outlines=EMPOLN;
   -- flush shared pool to clean the memory before executing this sql so that later we can pick correct sql_id from v$sql view
   -- execute with sys user on another sql prompt
   sql> alter system flush shared_pool;
-- Execute below on same sql command prompt  as above
sql> select * from emp where empno = 30;
-- Verify if the plan is used by above query or not
SQL> SELECT name, category, used FROM user_outlines;

SQL> /
NAME                           CATEGORY                       USED
------------------------------ ------------------------------ ------
EMPOLN                         DEFAULT                        USED
--
 /*    ------ Login with sys and execute to see the plan
 
-- Find out the sql_id to
see the stored plan executed in the memory
       --------  */
SQL> select sql_id from v$sql where sql_text like '%select * from emp where empno = 30%'
/
-- Note the sql_id to pass the same for below query

-- Execute with user sys
select t.*
     from v$sql s
        , table(dbms_xplan.display_cursor(s.sql_id, s.child_number, 'TYPICAL ALLSTATS LAST')) t
     where s.sql_id = '&sql_id'

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------
SQL_ID  6v6gcqm49qmvd, child number 0
-------------------------------------
select * from emp where empno = 30
Plan hash value: 2941534610
--------------------------------------------------------------------------------
--------------
| Id  | Operation                   | Name          | E-Rows |E-Bytes| Cost (%CP
U)| E-Time   |
PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
--------------
|   0 | SELECT STATEMENT            |               |        |       |     2 (10
0)|          |
|   1 |  TABLE ACCESS BY INDEX ROWID| EMP           |      8 |   696 |     2   (
0)| 00:00:01 |
|*  2 |   INDEX RANGE SCAN          | IDX_EMP_EMPNO |      3 |       |     1   (
PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------
0)| 00:00:01 |
--------------------------------------------------------------------------------
--------------

Predicate Information (identified by operation id):
---------------------------------------------------
   2 - access("EMPNO"=30)

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------
Note
-----
   - outline "EMPOLN" used for this statement
   - Warning: basic plan statistics not available. These are only collected when
:
       * hint 'gather_plan_statistics' is used for the statement or
       * parameter 'statistics_level' is set to 'ALL', at session or system leve
l
26 rows selected.
--- Procedure ends here -----

--- Additional information to view as outln user
Login to outln and view the output of below query -

select ol_name, hint#, hint_text from ol$hints
where ol_name = 'EMPOLN'
 order by ol_name, hint#
/
Identify and Kill Specific Session in the database

Perform the below mentioned steps by sys or system user
  1. Find out specific SID and Serial# for a particular session from the v$session view and then use
  2. Alter system kill session .... command to kill the session

SQL> connect sys/change_on_install
Connected.
SQL> alter session set nls_date_format='DD-MON-YYYY:HH24:MI:SS';
Session altered.
SQL> select username, sid, serial#, terminal, program, status, logon_time from v
$session where username = 'OPS$OPAPPS'
SQL> /
USERNAME                              SID    SERIAL# TERMINAL
------------------------------ ---------- ---------- ----------------
PROGRAM                                                            STATUS
---------------------------------------------------------------- --------
LOGON_TIME
--------------------------
OPS$OPAPPS                             14       6114 OCDEV2
                                                                 INACTIVE
29-MAR-2006:15:00:13
OPS$OPAPPS                             16        929 OCDEV2
javaw.exe                                                        INACTIVE
29-MAR-2006:14:58:28

2 rows selected.

SQL> alter system kill session '16,929';
System altered.
SQL> select username, sid, serial#, terminal, program, status, logon_time from v
$session where username = 'OPS$OPAPPS';
USERNAME                              SID    SERIAL# TERMINAL
------------------------------ ---------- ---------- ----------------
PROGRAM                                                          STATUS
---------------------------------------------------------------- --------
LOGON_TIME
--------------------------
OPS$OPAPPS                             14       6114 OCDEV2
                                                                 INACTIVE
29-MAR-2006:15:00:13
OPS$OPAPPS                             16        929 OCDEV2
javaw.exe                                                        KILLED
29-MAR-2006:14:58:28

2 rows selected.

Sunday, November 4, 2012

Hidden Oracle Parameters for Emergency Recovery

Hidden Oracle Parameters for Emergency Recovery



_allow_resetlogs_corruption- This parameters is the only method to initialize 'db backed-up open without setting backup' on the tablespace. It makes your system unsupported.

_corrupted_rollback_segments- It is the only method to start up your database with damaged public rollback segments. You can use this undocumented parameter without concerning about invalidating support.

_allow_read_only_corruption- It enables the database to be accessed even if it is corrupt. You should use this option only while exporting as much information from your damaged Oracle database as possible before you re-create the database. You must not use the database in normal manner, which is opened in this manner.

_corrupt_blocks_on_stuck_recovery - You can use this database to start your damaged database. But, it is possibly not supported if you do it without blessing of Oracle. You should immediately expert database tables in such cases and rebuild it.

Note : You should use the above hidden parameters to recover database only in case of emergency. These must be used by taking help from technical support of Oracle as improper use of these hidden parameters can cause further data loss.

Step by Step Setup of Physical Standby database on 11g

Creating Physical Standby Database

Database
DB_UNIQUE_NAME
Oracle Net Service Name
Primary
CTMSD1
CTMSD1
Physical standby
CTMSP1
CTMSP1


A)    Performing Tasks on Primary DB

a.      Enable Archiving on the Primary database

SQL> SHUTDOWN IMMEDIATE;
SQL> STARTUP MOUNT;
SQL> ALTER DATABASE ARCHIVELOG;
SQL> ALTER DATABASE OPEN;
SQL> Archive log start;

b.     Enable Forced Logging

Place the primary database in FORCE LOGGING mode after database creation using the following SQL statement:
SQL> ALTER DATABASE FORCE LOGGING;

c.      Setup Primary DB Initialization Parameters

-----Sample Parameters (Required for Primary Database Role) -------

DB_NAME=CTMSD1
DB_UNIQUE_NAME=CTMSD1
‘--LOG_ARCHIVE_CONFIG='DG_CONFIG= (ctmsd1, ctmsp1)'
CONTROL_FILES='E:\oracle\CTMSD1\CONTROL01.CTL', 'E:\oracle\CTMST1\CONTROL02.CTL', ‘E:\oracle\CTMST1\CONTROL03.CTL'
LOG_ARCHIVE_DEST_1='LOCATION= E:\archive\ctmsd1’
LOG_ARCHIVE_DEST_2='SERVICE=ctmsp1 SYNC AFFIRM’   
LOG_ARCHIVE_DEST_STATE_1=ENABLE
LOG_ARCHIVE_DEST_STATE_2=ENABLE
REMOTE_LOGIN_PASSWORDFILE=EXCLUSIVE
LOG_ARCHIVE_FORMAT=CDM%t_%s_%r.arc
LOG_ARCHIVE_MAX_PROCESSES=10
FAL_SERVER=ctmsp1
FAL_CLIENT=ctmsd1
DB_FILE_NAME_CONVERT='ctmsp1','ctmsd1'
LOG_FILE_NAME_CONVERT='E:\archive\ctmsp1\','E:\archive\ctmsd1\'
STANDBY_FILE_MANAGEMENT=AUTO

----- Actual parameters file on Primary ----------------------
ctmsd1.__db_cache_size=218103808
ctmsd1.__java_pool_size=4194304
ctmsd1.__large_pool_size=4194304
ctmsd1.__oracle_base='E:\Oracle'#ORACLE_BASE set from environment
ctmsd1.__pga_aggregate_target=322961408
ctmsd1.__sga_target=536870912
ctmsd1.__shared_io_pool_size=0
ctmsd1.__shared_pool_size=301989888
ctmsd1.__streams_pool_size=0
*.audit_file_dest='E:\Oracle\admin\CTMSD1\adump'
*.audit_trail='db'
*.compatible='11.1.0.0.0'
*.control_files='E:\Oracle\CTMSD1\control01.ctl','E:\Oracle\CTMSD1\control02.ctl','E:\Oracle\CTMSD1\control03.ctl'
*.db_block_size=8192
*.db_domain=''
*.db_file_name_convert='E:\oracle\CTMSP1\','E:\oracle\CTMSD1\'
*.db_name='CTMSD1'
*.db_recovery_file_dest='E:\Oracle\flash_recovery_area'
*.db_recovery_file_dest_size=2147483648
*.diagnostic_dest='E:\Oracle'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=CTMSD1XDB)'
*.fal_client='CTMSD1'
*.fal_server='CTMSP1'
*.LOG_ARCHIVE_DEST_1='LOCATION=E:\archive\ctmsd1 VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=ctmsd1'
*.LOG_ARCHIVE_DEST_2='SERVICE=ctmsp1 ASYNC VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=ctmsp1'
*.log_archive_dest_state_1='ENABLE'
*.log_archive_dest_state_2='ENABLE'
*.log_archive_format='cdm%t_%s_%r.arc'
*.log_archive_max_processes=10
*.log_file_name_convert='E:\archive\ctmsp1\','E:\archive\ctmsd1\'
*.memory_target=857735168
*.open_cursors=300
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.standby_file_management='AUTO'
*.undo_tablespace='UNDOTBS1'

d.     Create a backup copy of the database datafiles of Primary DB

Backup copy of database datafiles would be used for creating standby database.

e.      Create a Control File for the Standby Database

Shutdown the primary database and mount the database for creating standby control file –
SQL> ALTER DATABASE CREATE STANDBY CONTROLFILE AS ‘E:\Oracle\CTMSP1\ctmsp1.ctl';
SQL> ALTER DATABASE OPEN;

B)    Performing Tasks on Standby DB


a.      Prepare the initialization parameter file for the standby database

Create a text parameter file from the primary database –
SQL> CREATE PFILE='e:\initctmsp1.ora' FROM SPFILE;
 

b.     Modify initialization parameters for Standby database

------- Sample standby DB initialization Parameters ----------------
DB_NAME=ctmsd1
DB_UNIQUE_NAME=ctmsp1
‘-- LOG_ARCHIVE_CONFIG='DG_CONFIG=(chicago,boston)'
CONTROL_FILES='E:\oracle\CTMSP1\Ctmsp1.CTL'
DB_FILE_NAME_CONVERT='ctmsd1','ctmsp1'
LOG_FILE_NAME_CONVERT='E:\archive\ctmsd1\','E:\archive\ctmsp1\'
DB_FILE_NAME_CONVERT = 'E:\oracle\CTMSD1\' , 'E:\oracle\CTMSP1\'
LOG_ARCHIVE_FORMAT=CDM%t_%s_%r.arc
LOG_ARCHIVE_DEST_1='LOCATION='E:\archive\ctmsp1'
LOG_ARCHIVE_DEST_2='SERVICE=ctmsd1 SYNC AFFIRM’   
LOG_ARCHIVE_DEST_STATE_1=ENABLE
LOG_ARCHIVE_DEST_STATE_2=ENABLE
REMOTE_LOGIN_PASSWORDFILE=EXCLUSIVE
STANDBY_FILE_MANAGEMENT=AUTO
FAL_SERVER=ctmsd1
FAL_CLIENT=ctmsp1
---------- Actual File Standby Init<SID>.ora-----------------
ctmsp1.__db_cache_size=218103808
ctmsp1.__java_pool_size=4194304
ctmsp1.__large_pool_size=4194304
ctmsp1.__oracle_base='E:\Oracle'#ORACLE_BASE set from environment
ctmsp1.__pga_aggregate_target=322961408
ctmsp1.__sga_target=536870912
ctmsp1.__shared_io_pool_size=0
ctmsp1.__shared_pool_size=301989888
ctmsp1.__streams_pool_size=0
*.instance_name=ctmsp1
*.audit_file_dest='E:\Oracle\admin\ctmsp1\adump'
*.audit_trail='db'
*.compatible='11.1.0.0.0'
*.control_files='e:\oracle\ctmsp1\ctmsp1.ctl'
*.db_block_size=8192
*.db_unique_name=ctmsp1
*.db_domain=''
*.db_file_name_convert='E:\oracle\ctmsd1\','E:\oracle\ctmsp1\'
*.db_name='ctmsd1'
*.db_recovery_file_dest='E:\Oracle\flash_recovery_area'
*.db_recovery_file_dest_size=2147483648
*.diagnostic_dest='E:\Oracle'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=ctmsp1XDB)'
*.fal_client='CTMSP1'
*.fal_server='ctmsd1'
*.LOG_ARCHIVE_DEST_1='LOCATION=E:\archive\ctmsp1 VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=ctmsp1'
*.LOG_ARCHIVE_DEST_2='SERVICE=ctmsd1 ASYNC VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=ctmsd1'
*.log_archive_dest_state_1='ENABLE'
*.log_archive_dest_state_2='ENABLE'
*.log_archive_format='cdm%t_%s_%r.arc'
*.log_archive_max_processes=10
*.log_file_name_convert='E:\archive\ctmsd1\','E:\archive\ctmsp1\'
*.memory_target=857735168
*.open_cursors=300
*.processes=150
*.remote_login_passwordfile='EXCLUSIVE'
*.standby_file_management='AUTO'
*.undo_tablespace='UNDOTBS1'


Note: Ensure the COMPATIBLE initialization parameter is set to the same value on both the primary and standby databases.

c.      Setup Environment to support Standby database

a.       Create a Windows-based service
b.      oradim -new -sid ctmsp1 -syspwd manager -startmode manual -pfile E:\Oracle\product\11.1.0\db_1\database\initctmsp1.ora
c.       Copy the remote login password file from the primary database system to the standby database system. If this step get missed then it would give authentication error on standby database.                
Note: This step is optional if operating system authentication is used for administrative users and if SSL is used for redo transport authentication
d.      Configure listener for primary and standby databases – use Netmgr utility to configure listener. To start and stop the listener at both primary and secondary.
e.      To start and stop the listener –
Lsnrctl stop
Lsnrctl start
f.        Create Oracle Net Services (configuring tnsnames.ora), for Primary and Standby log shipping, using Oracle Net Manger or configure manually.
g.       Create SP file for Standby database
SQL> CREATE SPFILE FROM PFILE='initctmsp1.ora';
h.       Create a Control File for the Standby Database
If the backup procedure required you to shut down the primary database, issue the following SQL*Plus statement to start the primary database:
SQL> STARTUP MOUNT;
Then, create the control file for the standby database, and open the primary database to user access, as shown in the following example:
SQL> ALTER DATABASE CREATE STANDBY CONTROLFILE AS 'e:\oracle\ctmsp1\ctmsp1.ctl';
SQL> ALTER DATABASE OPEN;
 

B)      Start the Physical Standby database
a.       Start the physical Standby database
On the standby database, issue the following SQL statement to start and mount the database:
SQL> STARTUP MOUNT;
Note: The AFFIRM attribute is used to specify that redo received from a redo source database is not acknowledged until it has been written to the standby redo log. The NOAFFIRM attribute is used to specify that received redo is acknowledged without waiting for received redo to be written to the standby redo log
b.      Prepare the Standby database to Receive Redo data
Copy the password file from the primary database to standby database. This ensure the heart bit for log shipping.
c.       Create online Redo log on the Standby Database (optional)
Although the step is optional but Oracle recommends that an online redo log be created when a standby database is created. By this standby database would get ready for quick transitioning to Primary role.
d.      Start Redo Apply
On the standby database, issue the following command to start Redo Apply:
SLQ>alter database mount standby database;
Note: Copy the primary archived logfiles to F: \oracle\admin\STDHCL\Archive’
SQL>RECOVER STANDBY DATABASE;
Now of course, you’ll want to test your standby database, You do this by starting up in read only mode.
SQL>alter database open read only;
SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT FROM SESSION;
The statement includes the DISCONNECT FROM SESSION option so that Redo Apply runs in a background session.
C)      Verify the Physical Standby Database Is Performing Properly
a.       Identify the existing archived redo log files.
On the standby database, query the V$ARCHIVED_LOG view to identify existing files in the archived redo log. For example:
SQL> SELECT SEQUENCE#, FIRST_TIME, NEXT_TIME
          FROM V$ARCHIVED_LOG ORDER BY SEQUENCE#;
b.       Force a log switch to archive the current online redo log file
On the primary database, issue the ALTER SYSTEM SWITCH LOGFILE statement to force a log switch and archive the current online redo log file group:
SQL> ALTER SYSTEM SWITCH LOGFILE;
c.       Verify the new redo data was archived on the standby database.
On the standby database, query the V$ARCHIVED_LOG view to verify the redo data was received and archived on the standby database:
SQL> SELECT SEQUENCE#, FIRST_TIME, NEXT_TIME
          FROM V$ARCHIVED_LOG ORDER BY SEQUENCE#;
 
d.      Verify that received redo has been applied.
On the standby database, query the V$ARCHIVED_LOG view to verify that received redo has been applied:
SQL> SELECT SEQUENCE#, APPLIED FROM V$ARCHIVED_LOG
          ORDER BY SEQUENCE#;
 

Starting Redo Apply

                                                                                           
SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE;
 
OR
To start real-time apply, include the USING CURRENT LOGFILE clause on the SQL statement. For example:
SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE;
 

Stopping Redo Apply

To stop Redo Apply, issue the following SQL statement in another window:
SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;
 
To Check the archived files of the Primary database
SQL> select sequence# from v$archived_log where dest_id=1 order by sequence#;
To Check the Applied archive files of the Standby database
SQL> select sequence#, applied from v$archived_log order by sequence#;

On either database, you can query the V$LOGFILE view and obtain a list of the SRL files that have been created:

SQL> SELECT GROUP#, MEMBER FROM V$LOGFILE WHERE TYPE='STANDBY';

SQL> ALTER DATABASE DROP STANDBY LOGFILE MEMBER ‘../..’;
SQL> SELECT DESTINATION FROM V$ARCHIVE_DEST WHERE DEST_ID=1;    -- On Primary it is Primary DB archive
SQL> SELECT DESTINATION FROM V$ARCHIVE_DEST WHERE DEST_ID=2; -- on Primary it is secondary DB archive destination
Assuming we have three ORL groups of 50MB each, we will create four SRL groups on the primary database:

SQL> ALTER DATABASE ADD STANDBY LOGFILE '+FLASH' SIZE 50M
SQL> ALTER DATABASE ADD STANDBY LOGFILE '+FLASH' SIZE 50M
SQL> ALTER DATABASE ADD STANDBY LOGFILE '+FLASH' SIZE 50M
SQL> ALTER DATABASE ADD STANDBY LOGFILE '+FLASH' SIZE 50M

Free DB space Report of database

Free DB space Report of database (tested on 11g)


Set head off echo off

select 'Report generated at', to_char(systimestamp, 'HH24:MI:SS "on" Mon DD, YYYY ( ')
                        || trim(to_char(systimestamp, 'Day'))
                        || to_char(systimestamp, ' ) "in Timezone" TZR')
from dual;



set pagesize 1000
set line 200
set trims on
col tablespace_name format a25            heading "Tabsp Name"
col file_name       format a45            heading "File Name"
col total_size      format 999,999.00     heading "Size MB"
col free_space      format 999,999.00     heading "Free MB"
col pct_used        format 999.00         heading "%|Used"
Col MAX_Size            Format 99999.00          heading  "Max Size (MB)"

clear breaks

Set head on echo off

select df.tablespace_name
,      df.file_name
,      df.bytes/1024/1024                        total_size
,      nvl(fr.bytes/1024/1024,0)                 free_space
,      ((df.bytes-nvl(fr.bytes,0))/df.bytes)*100 pct_used
, df.maxbytes/1024/1024                          Max_Size
from   (select sum(bytes) bytes
        ,      file_id
        from   dba_free_space
        group by file_id)     fr
,       dba_data_files        df
where df.file_id = fr.file_id(+)
order by 1, df.file_id
/


/* aw_tablespaces.sql
This script generates a report about the tablespaces in the database
and includes information about analytic workspaces. The following is an
explanation of the output columns.

 Tablespace = name of tablespace
 TYPE       = U:UNDO T:TEMPORARY P:PERMANENT
 AUTO       = Is the tablespace auto-extensible Y or N
 SEGM       = Is the tablespace SEGMENT SPACE MANAGEMENT A:AUTO M:MANUAL
 PLUG       = Is the tablespace PLUGGED IN Y:YES or N:NO
 LOGG       = Is the tablespace LOGGING Y:LOGGING or N:NOLOGGING
 LIVE       = Status of the tablespace Y:ONLINE N:OFFLINE
 Users      = How many users own objects in the tablespace
 Size(MB)   = Total size of the tablespace
 Used(MB)   = Total space occupied by objects in the tablespace
 AW(MB)     = How much space is consumed by analytic workspaces in the tablespace
 AW#        = Count of analytic workspaces in the tablepace
 AWPts      = Count of partitions for analytic workspaces in the tablespace   
*/


set lines 150 pages 500 feedback off head on
clear bre col comp buff
col name        for a18         hea "Tablespace"
col ownr        for 990         hea "Users"
col ttype       for a1          hea "T|Y|P|E"
col auto        for a1          hea "A|U|T|O"
col segm        for a1          hea "S|E|G|M"
col plg         for a1          hea "P|L|U|G"
col lgg         for a1          hea "L|O|G|G"
col status      for a1          hea "L|I|V|E"
col sz          for 999,990.9   hea "Size(MB)"
col usd         for 999,990.9   hea "Used(MB)"
col awsz        for 999,990.9   hea "AW(MB)"
col aws         for 990         hea "AW#"
col segs        for 990         hea "AWPts"

bre on REPORT;

comp sum lab total of aws  on REPORT;
comp sum lab total of awsz on REPORT;
comp sum lab total of fr   on REPORT;
comp sum lab total of segs on REPORT;
comp sum lab total of sz   on REPORT;
comp sum lab total of usd  on REPORT;

-- Uncomment the next line to create a view definition (optional)
-- create or replace view aw_storage as

SELECT d.tablespace_name name, substr(d.contents, 1, 1) ttype,
  substr(a.autoextensible, 1, 1) auto, substr(d.segment_space_management, 1, 1) segm,
  substr(d.plugged_in, 1, 1) plg, decode(d.logging,'LOGGING','Y','NOLOGGING','N','?') lgg,
  decode(d.status, 'ONLINE', 'Y', 'OFFLINE', 'N', '?') status,
  NVL(o.ownr,0) ownr, NVL(a.bytes/1024/1024,0) sz,
  ((NVL(a.bytes/1024/1024,0))-(NVL(NVL(f.bytes,0),0)/1024/1024)) usd,
  NVL(g.bytes/1024/1024,0) awsz, NVL(g.awcnt,0) aws, NVL(g.segcnt,0) segs
FROM   sys.dba_tablespaces d,
  (select tablespace_name, autoextensible, sum(bytes) bytes
   from dba_data_files group by tablespace_name, autoextensible) a,
  (select tablespace_name, sum(bytes) bytes from dba_free_space group by tablespace_name) f,
  (select dbas.tablespace_name, count(distinct table_name) as awcnt,
   count(*) as segcnt,  sum(dbas.bytes) bytes from dba_lobs dbal, dba_segments dbas
   where dbal.column_name = 'AWLOB' and dbal.segment_name = dbas.segment_name
   group by dbas.tablespace_name) g,
  (select tablespace_name, count(distinct owner) ownr
   from dba_segments group by tablespace_name) o
WHERE  d.tablespace_name = a.tablespace_name(+) AND d.tablespace_name = f.tablespace_name(+)
AND    d.tablespace_name = g.tablespace_name(+) AND d.tablespace_name = o.tablespace_name(+)
AND    NOT (d.extent_management = 'LOCAL' AND d.contents = 'TEMPORARY')
UNION ALL
SELECT d.tablespace_name name, substr(d.contents, 1, 1) ttype,
  substr(a.autoextensible, 1, 1) auto, substr(d.segment_space_management, 1, 1) segm,
  substr(d.plugged_in, 1, 1) plg, decode(d.logging,'LOGGING','Y','NOLOGGING','N','?') lgg,
  decode(d.status, 'ONLINE', 'Y', 'OFFLINE', 'N', '?') status,
  NVL(o.ownr, 0) ownr, NVL(a.bytes /1024/1024, 0) sz,
  ((NVL(a.bytes/1024/1024,0))-(NVL((a.bytes-t.bytes), a.bytes)/1024/1024)) usd,
  NVL(g.bytes/1024/1024,0) awsz, NVL(g.awcnt,0) aws, NVL(g.segcnt,0) segs
FROM   sys.dba_tablespaces d,
  (select tablespace_name, autoextensible, sum(bytes) bytes
   from dba_temp_files group by tablespace_name, autoextensible) a,
  (select tablespace_name, sum(bytes_cached) bytes
   from gv$temp_extent_pool group by tablespace_name) t,
  (select dbas.tablespace_name, count(distinct table_name) as awcnt,
   count(*) as segcnt, sum(dbas.bytes) bytes from dba_lobs dbal, dba_segments dbas
   where dbal.column_name = 'AWLOB' and dbal.segment_name = dbas.segment_name
   group by dbas.tablespace_name) g,
  (select tablespace_name, count(distinct owner) ownr
  from dba_segments group by tablespace_name) o
WHERE  d.tablespace_name = a.tablespace_name(+) AND d.tablespace_name = t.tablespace_name(+)
AND    d.tablespace_name = g.tablespace_name(+) AND d.tablespace_name = o.tablespace_name(+)
AND    d.extent_management = 'LOCAL' AND d.contents = 'TEMPORARY'
ORDER BY ttype, name
/