Skip to content

Oracle Performance Tips

This guide covers Oracle PL/SQL and SQL performance optimization techniques, best practices, and tools for writing efficient database code.

🎯 Core Performance Principles

Oracle Performance Characteristics

  • Database Engine: Optimized for set-based operations
  • PL/SQL Engine: Procedural language with SQL integration
  • Cost-Based Optimizer: Smart query optimization
  • Multi-version Concurrency: MVCC for consistent reads

Measurement First

  • Execution Plans: Analyze query execution paths
  • Statistics: Maintain accurate table/index statistics
  • AWR Reports: Automatic Workload Repository analysis
  • SQL Trace: Detailed execution tracing

🚀 SQL Optimization

Query Writing Best Practices

-- Bad: Cartesian product
SELECT e.name, d.department_name
FROM employees e, departments d;

-- Good: Proper join conditions
SELECT e.name, d.department_name
FROM employees e
JOIN departments d ON e.department_id = d.department_id;

-- Bad: Selecting unnecessary columns
SELECT * FROM employees WHERE salary > 50000;

-- Good: Select only needed columns
SELECT employee_id, name, salary 
FROM employees 
WHERE salary > 50000;

-- Bad: Functions on indexed columns (prevents index usage)
SELECT * FROM employees WHERE UPPER(name) = 'JOHN';

-- Good: Use function-based indexes or avoid functions
SELECT * FROM employees WHERE name = 'John';

Index Usage Optimization

-- Create appropriate indexes
CREATE INDEX idx_employees_salary ON employees(salary);
CREATE INDEX idx_employees_dept_salary ON employees(department_id, salary);

-- Function-based index for case-insensitive search
CREATE INDEX idx_employees_name_upper ON employees(UPPER(name));

-- Bitmap indexes for low-cardinality columns
CREATE BITMAP INDEX idx_employees_gender ON employees(gender);

-- Composite index for multiple column queries
CREATE INDEX idx_employees_dept_status ON employees(department_id, status);

-- Monitor index usage
SELECT * FROM v$object_usage 
WHERE used = 'YES' AND object_name LIKE 'IDX_%';

Subquery Optimization

-- Bad: Correlated subquery (slow for large datasets)
SELECT e.name 
FROM employees e
WHERE e.salary > (
    SELECT AVG(salary) 
    FROM employees e2 
    WHERE e2.department_id = e.department_id
);

-- Good: JOIN with GROUP BY (faster)
SELECT e.name 
FROM employees e
JOIN (
    SELECT department_id, AVG(salary) as avg_salary
    FROM employees
    GROUP BY department_id
) dept_avg ON e.department_id = dept_avg.department_id
WHERE e.salary > dept_avg.avg_salary;

-- Good: EXISTS instead of IN for better performance
SELECT e.name 
FROM employees e
WHERE EXISTS (
    SELECT 1 FROM departments d 
    WHERE d.department_id = e.department_id 
    AND d.location = 'New York'
);

📊 PL/SQL Optimization

Bulk Operations

-- Bad: Row-by-row processing
CREATE OR REPLACE PROCEDURE update_salaries_slow AS
    CURSOR emp_cursor IS
        SELECT employee_id, salary FROM employees;
BEGIN
    FOR emp_rec IN emp_cursor LOOP
        UPDATE employees 
        SET salary = salary * 1.1 
        WHERE employee_id = emp_rec.employee_id;
    END LOOP;
END;
/

-- Good: BULK COLLECT and FORALL
CREATE OR REPLACE PROCEDURE update_salaries_fast AS
    TYPE emp_id_table IS TABLE OF employees.employee_id%TYPE INDEX BY PLS_INTEGER;
    TYPE salary_table IS TABLE OF employees.salary%TYPE INDEX BY PLS_INTEGER;

    v_emp_ids emp_id_table;
    v_salaries salary_table;
    v_count PLS_INTEGER := 0;
BEGIN
    -- Collect data in bulk
    SELECT employee_id, salary 
    BULK COLLECT INTO v_emp_ids, v_salaries
    FROM employees;

    -- Update in bulk
    FORALL i IN 1..v_emp_ids.COUNT
        UPDATE employees 
        SET salary = v_salaries(i) * 1.1 
        WHERE employee_id = v_emp_ids(i);

    COMMIT;
END;
/

Cursor Optimization

-- Bad: Implicit cursor (less control)
CREATE OR REPLACE PROCEDURE process_employees_bad AS
BEGIN
    FOR emp_rec IN (SELECT * FROM employees) LOOP
        -- Process employee
        NULL;
    END LOOP;
END;
/

-- Good: Explicit cursor with proper handling
CREATE OR REPLACE PROCEDURE process_employees_good AS
    CURSOR emp_cursor IS
        SELECT employee_id, name, salary 
        FROM employees 
        WHERE department_id = 10;

    v_emp_rec emp_cursor%ROWTYPE;
BEGIN
    OPEN emp_cursor;
    LOOP
        FETCH emp_cursor INTO v_emp_rec;
        EXIT WHEN emp_cursor%NOTFOUND;

        -- Process employee
        DBMS_OUTPUT.PUT_LINE('Processing: ' || v_emp_rec.name);

        -- Process 1000 records at a time
        IF MOD(emp_cursor%ROWCOUNT, 1000) = 0 THEN
            COMMIT;  -- Commit periodically
        END IF;
    END LOOP;

    CLOSE emp_cursor;
    COMMIT;
END;
/

-- Better: Use BULK COLLECT with LIMIT
CREATE OR REPLACE PROCEDURE process_employees_bulk AS
    CURSOR emp_cursor IS
        SELECT employee_id, name, salary 
        FROM employees 
        WHERE department_id = 10;

    TYPE emp_table_type IS TABLE OF emp_cursor%ROWTYPE;
    v_employees emp_table_type;

    v_batch_size CONSTANT PLS_INTEGER := 1000;
BEGIN
    OPEN emp_cursor;
    LOOP
        FETCH emp_cursor BULK COLLECT INTO v_employees LIMIT v_batch_size;
        EXIT WHEN v_employees.COUNT = 0;

        -- Process batch
        FOR i IN 1..v_employees.COUNT LOOP
            DBMS_OUTPUT.PUT_LINE('Processing: ' || v_employees(i).name);
        END LOOP;

        COMMIT;
    END LOOP;

    CLOSE emp_cursor;
END;
/

Exception Handling Optimization

-- Bad: Generic exception handling
CREATE OR REPLACE PROCEDURE generic_exception AS
BEGIN
    -- Some operations
    NULL;
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error occurred');
END;
/

-- Good: Specific exception handling
CREATE OR REPLACE PROCEDURE specific_exception AS
    v_employee_id employees.employee_id%TYPE := 999;
    v_employee employees%ROWTYPE;
BEGIN
    -- Specific operations
    SELECT * INTO v_employee 
    FROM employees 
    WHERE employee_id = v_employee_id;

EXCEPTION
    WHEN NO_DATA_FOUND THEN
        DBMS_OUTPUT.PUT_LINE('Employee not found: ' || v_employee_id);
    WHEN TOO_MANY_ROWS THEN
        DBMS_OUTPUT.PUT_LINE('Multiple employees found: ' || v_employee_id);
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Unexpected error: ' || SQLERRM);
        -- Log error for debugging
        INSERT INTO error_log (error_time, error_message, error_code)
        VALUES (SYSDATE, SQLERRM, SQLCODE);
END;
/

🔍 Performance Analysis Tools

Execution Plan Analysis

-- Generate execution plan
EXPLAIN PLAN FOR
SELECT e.name, d.department_name, l.city
FROM employees e
JOIN departments d ON e.department_id = d.department_id
JOIN locations l ON d.location_id = l.location_id
WHERE e.salary > 50000;

-- View execution plan
SELECT * FROM TABLE(DBMS_XPLAN.DISPLAY);

-- Display with advanced options
SELECT * FROM TABLE(DBMS_XPLAN.DISPLAY(
    format => 'ALL',
    projection => 'ALL'
));

-- Real-time execution plan
SELECT * FROM TABLE(DBMS_XPLAN.DISPLAY_CURSOR(
    sql_id => 'your_sql_id',
    format => 'ALL'
));

SQL Trace and TKPROF

-- Enable SQL Trace
ALTER SESSION SET sql_trace = TRUE;

-- Run your query
SELECT * FROM employees WHERE salary > 50000;

-- Disable SQL Trace
ALTER SESSION SET sql_trace = FALSE;

-- Find trace file
SELECT value 
FROM v$parameter 
WHERE name = 'user_dump_dest';

-- Use TKPROF to analyze trace file
-- tkprof trace_file.trc output_file.prf explain= waits=yes

AWR and ASH Reports

-- Generate AWR report
SELECT * FROM TABLE(
    DBMS_WORKLOAD_REPOSITORY.AWR_REPORT_HTML(
        l_dbid => (SELECT dbid FROM v$database),
        l_inst_num => (SELECT instance_number FROM v$instance),
        l_bid => (SELECT snap_id FROM v$session WHERE username = USER),
        l_eid => (SELECT snap_id FROM v$session WHERE username = USER) - 1,
        l_options => 0
    )
);

-- Generate ASH report for specific SQL
SELECT * FROM TABLE(
    DBMS_WORKLOAD_REPOSITORY.ASH_REPORT_SQL(
        l_sql_id => 'your_sql_id',
        l_dbid => (SELECT dbid FROM v$database),
        l_inst_num => (SELECT instance_number FROM v$instance),
        l_bid => (SELECT snap_id FROM v$session WHERE username = USER) - 1,
        l_eid => (SELECT snap_id FROM v$session WHERE username = USER),
        l_sql_hash => NULL,
        l_options => 0
    )
);

🛠️ Database Optimization

Table Design Optimization

-- Use appropriate data types
CREATE TABLE optimized_employees (
    employee_id NUMBER(6) GENERATED ALWAYS AS IDENTITY,
    name VARCHAR2(100) NOT NULL,
    email VARCHAR2(255) UNIQUE,
    salary NUMBER(10,2) CHECK (salary > 0),
    hire_date DATE DEFAULT SYSDATE,
    department_id NUMBER(4),
    status VARCHAR2(20) DEFAULT 'ACTIVE',
    created_at TIMESTAMP DEFAULT SYSDATE,
    updated_at TIMESTAMP DEFAULT SYSDATE
);

-- Add constraints for data integrity
ALTER TABLE optimized_employees 
ADD CONSTRAINT pk_employees PRIMARY KEY (employee_id),
ADD CONSTRAINT fk_employees_dept FOREIGN KEY (department_id) REFERENCES departments(department_id),
ADD CONSTRAINT chk_employees_status CHECK (status IN ('ACTIVE', 'INACTIVE', 'TERMINATED'));

-- Add indexes for performance
CREATE INDEX idx_employees_dept_status ON optimized_employees(department_id, status);
CREATE INDEX idx_employees_email ON optimized_employees(email);
CREATE INDEX idx_employees_hire_date ON optimized_employees(hire_date);

Partitioning for Large Tables

-- Range partitioning by date
CREATE TABLE sales (
    sale_id NUMBER,
    sale_date DATE,
    customer_id NUMBER,
    amount NUMBER(10,2),
    region VARCHAR2(50)
) PARTITION BY RANGE (sale_date) (
    PARTITION sales_2022 VALUES LESS THAN (TO_DATE('2023-01-01', 'YYYY-MM-DD')),
    PARTITION sales_2023 VALUES LESS THAN (TO_DATE('2024-01-01', 'YYYY-MM-DD')),
    PARTITION sales_2024 VALUES LESS THAN (TO_DATE('2025-01-01', 'YYYY-MM-DD')),
    PARTITION sales_future VALUES LESS THAN (MAXVALUE)
);

-- List partitioning by region
CREATE TABLE regional_sales (
    sale_id NUMBER,
    sale_date DATE,
    customer_id NUMBER,
    amount NUMBER(10,2),
    region VARCHAR2(50)
) PARTITION BY LIST (region) (
    PARTITION sales_north VALUES ('North', 'Northeast', 'Northwest'),
    PARTITION sales_south VALUES ('South', 'Southeast', 'Southwest'),
    PARTITION sales_east VALUES ('East'),
    PARTITION sales_west VALUES ('West'),
    PARTITION sales_other VALUES (DEFAULT)
);

Materialized Views

-- Create materialized view for frequently accessed data
CREATE MATERIALIZED VIEW mv_employee_summary
BUILD IMMEDIATE
REFRESH COMPLETE ON DEMAND
AS
SELECT 
    d.department_name,
    COUNT(*) as employee_count,
    AVG(e.salary) as avg_salary,
    MAX(e.salary) as max_salary,
    MIN(e.salary) as min_salary
FROM employees e
JOIN departments d ON e.department_id = d.department_id
GROUP BY d.department_name;

-- Refresh materialized view
BEGIN
    DBMS_MVIEW.REFRESH('MV_EMPLOYEE_SUMMARY');
END;
/

-- Create materialized view log for fast refresh
CREATE MATERIALIZED VIEW LOG ON employees
WITH PRIMARY KEY, ROWID
INCLUDING (department_id, salary)
AS SELECT * FROM employees;

📈 Memory and I/O Optimization

SGA and PGA Tuning

-- Check memory usage
SELECT name, value, unit
FROM v$parameter
WHERE name IN ('sga_target', 'pga_aggregate_target', 'memory_target');

-- Check buffer cache hit ratio
SELECT 1 - (phy.value / (cur.value + con.value)) "Buffer Cache Hit Ratio"
FROM v$sysstat cur, v$sysstat con, v$sysstat phy
WHERE cur.name = 'db block gets'
  AND con.name = 'consistent gets'
  AND phy.name = 'physical reads';

-- Check library cache hit ratio
SELECT sum(pins) "Total Pins",
       sum(reloads) "Total Reloads",
       sum(reloads)/sum(pins) "Reload Ratio"
FROM v$librarycache;

I/O Optimization

-- Check tablespace I/O
SELECT ts.name, df.file_name, ph.phyrds, ph.phywrts, ph.readtim, ph.writetim
FROM v$tablespace ts, v$datafile df, v$filestat ph
WHERE ts.ts# = df.ts#
  AND df.file# = ph.file#;

-- Check segment statistics
SELECT segment_name, segment_type, tablespace_name, blocks, extents
FROM user_segments
ORDER BY blocks DESC;

-- Check SQL with high I/O
SELECT sql_id, executions, disk_reads, buffer_gets, disk_reads/executions as reads_per_exec
FROM v$sqlarea
WHERE executions > 0
ORDER BY disk_reads DESC;

🎯 Performance Monitoring

Real-Time Monitoring

-- Monitor long-running queries
SELECT sid, serial#, username, sql_id, elapsed_seconds, sql_text
FROM v$session_longops
WHERE elapsed_seconds > 60;

-- Monitor blocking sessions
SELECT 
    blocking_session,
    blocked_session,
    blocking_instance,
    blocked_instance,
    blocking_username,
    blocked_username,
    sql_id
FROM v$blocking_session;

-- Monitor wait events
SELECT event, total_waits, time_waited, average_wait
FROM v$system_event
WHERE total_waits > 0
ORDER BY time_waited DESC;

Performance Metrics

-- Database performance metrics
SELECT 
    metric_name,
    value,
    unit,
    timestamp
FROM v$sysmetric
WHERE metric_name IN ('Database CPU Time Ratio', 'Database Wait Time Ratio')
ORDER BY timestamp DESC;

-- Top SQL by execution time
SELECT 
    sql_id,
    executions,
    elapsed_time,
    elapsed_time/executions as avg_elapsed_time,
    cpu_time,
    cpu_time/executions as avg_cpu_time
FROM v$sql
WHERE executions > 0
ORDER BY elapsed_time DESC;

🔗 Language-Specific Performance