Oracle学习笔记(十)分区索引失效的思考

此处只说索引失效的场景(只会影响全局索引):
结论:全局索引truncate 分区和交换分区都会导致索引失效果
局部索引truncate分区不会导致索引失效。
drop table part_tab_trunc purge;
create table part_tab_trunc (id int,col2 int,col3 int,contents varchar2(4000))
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p3 values less than (maxvalue)
        )
        ;
insert into part_tab_trunc select rownum ,rownum+1,rownum+2, rpad('*',400,'*') from dual connect by rownum <=50000;
commit;
create  index idx_part_trunc_col2  on part_tab_trunc(col2) local;
create  index idx_part_trunc_col3  on part_tab_trunc(col3) ;


---分区truncate前
select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_TRUNC_COL2';
 
 
INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ --------
IDX_PART_TRUNC_COL2            P1                             USABLE
IDX_PART_TRUNC_COL2            P2                             USABLE
IDX_PART_TRUNC_COL2            P3                             USABLE

select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_TRUNC_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_TRUNC_COL3            VALID

alter table part_tab_trunc truncate partition p1 ;

---分区truncate后
select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_TRUNC_COL2';
 
 
INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ --------
IDX_PART_TRUNC_COL2            P1                             USABLE
IDX_PART_TRUNC_COL2            P2                             USABLE
IDX_PART_TRUNC_COL2            P3                             USABLE


select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_TRUNC_COL3';


INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_TRUNC_COL3            UNUSABLE

  

此处只说索引失效的场景(也是只影响全局索引):
--试验1(未加Update GLOBAL  indexes关键字)

drop table part_tab_drop purge;
create table part_tab_drop (id int,col2 int ,col3 int,contents varchar2(4000))
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p3 values less than (maxvalue)
        )
        ;
insert into part_tab_drop select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=50000;
commit;

create  index idx_part_drop_col2 on part_tab_drop(col2) local;
create  index idx_part_drop_col3 on part_tab_drop(col3) ;

--未drop分区之前
select index_name,status from user_indexes where index_name='IDX_PART_DROP_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_DROP_COL3             VALID

alter table part_tab_drop drop partition p1 ;

--已drop分区之后
select index_name,status from user_indexes where index_name='IDX_PART_DROP_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_DROP_COL3             UNUSABLE

--试验2(加Update GLOBAL  indexes关键字)
drop table part_tab_drop purge;
create table part_tab_drop (id int,col2 int ,col3 int,contents varchar2(4000))
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p3 values less than (maxvalue)
        )
        ;
insert into part_tab_drop select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=50000;
commit;

create  index idx_part_drop_col2 on part_tab_drop(col2) local;
create  index idx_part_drop_col3 on part_tab_drop(col3) ;

--未drop分区之前
select index_name,status from user_indexes where index_name='IDX_PART_DROP_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_DROP_COL3             VALID

alter table part_tab_drop drop partition p1 Update GLOBAL  indexes;

--已drop分区之后
select index_name,status from user_indexes where index_name='IDX_PART_DROP_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_DROP_COL3             VALID

  

--此处只说索引失效的场景:
--分区表SPLIT的时候,如果MAX区中已经有记录了,这个时候SPLIT就会导致有记录的新增分区的局部索引失效!
drop table part_tab_split purge;
create table part_tab_split (id int,col2 int ,col3 int ,contents varchar2(4000))
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p_max values less than (maxvalue)
        )
        ;
insert into part_tab_split select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=90000;
commit;
create index idx_part_split_col2 on part_tab_split (col2) local;
create index idx_part_split_col3 on part_tab_split (col3) ;

---分区split前
select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_SPLIT_COL2';

INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ -------
IDX_PART_SPLIT_COL2            P1                             USABLE
IDX_PART_SPLIT_COL2            P2                             USABLE
IDX_PART_SPLIT_COL2            P_MAX                          USABLE
 
select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_SPLIT_COL3';
 
INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_SPLIT_COL3            VALID
 
alter table part_tab_split SPLIT PARTITION   P_MAX  at (30000) into (PARTITION p3,PARTITION P_MAX);
alter table part_tab_split SPLIT PARTITION   P_MAX  at (40000) into (PARTITION p4,PARTITION P_MAX);
alter table part_tab_split SPLIT PARTITION   P_MAX  at (50000) into (PARTITION p5,PARTITION P_MAX);
alter table part_tab_split SPLIT PARTITION   P_MAX  at (60000) into (PARTITION p6,PARTITION P_MAX);
alter table part_tab_split SPLIT PARTITION   P_MAX  at (70000) into (PARTITION p7,PARTITION P_MAX);
 
---分区split后

select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_SPLIT_COL2';

INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ --------
IDX_PART_SPLIT_COL2            P1                             USABLE
IDX_PART_SPLIT_COL2            P2                             USABLE
IDX_PART_SPLIT_COL2            P3                             UNUSABLE
IDX_PART_SPLIT_COL2            P4                             UNUSABLE
IDX_PART_SPLIT_COL2            P5                             UNUSABLE
IDX_PART_SPLIT_COL2            P6                             UNUSABLE
IDX_PART_SPLIT_COL2            P7                             UNUSABLE
IDX_PART_SPLIT_COL2            P_MAX                          UNUSABLE
 
select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_SPLIT_COL3';
 
 
INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_SPLIT_COL3            UNUSABLE
 
--结论是:split会导致全局索引失效,也会导致局部索引失效。(假如P7分区没数据则局部索引不会失效)
 
--重建索引
--local索引重建
select b.table_name,
       a.INDEX_NAME,
       a.PARTITION_NAME,
       a.STATUS,
       'alter index ' || a.index_name || ' rebuild partition ' ||partition_name || ';' 重建列
  from USER_IND_PARTITIONS a, user_part_indexes b
 where a.index_name = b.index_name
   and b.TABLE_NAME IN ('PART_TAB_SPLIT')
   and  STATUS = 'UNUSABLE'
 ORDER BY b.table_name, a.INDEX_NAME, a.PARTITION_NAME;

--全局索引重建 
 alter index idx_part_split_col3 rebuild;

  

add不会导致全局和局部索引失效!
drop table part_tab_add purge;
create table part_tab_add (id int,col2 int,col3 int,contents varchar2(4000))
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p3 values less than (30000),
        partition p4 values less than (40000),
        partition p5 values less than (50000)
        )
        ;
insert into part_tab_add select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=45000;
commit;

create index idx_part_add_col2 on part_tab_add (col2) local;
create index idx_part_add_col3 on part_tab_add (col3) ;

---分区add前
select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_ADD_COL2';
 
 INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ ------
IDX_PART_ADD_COL2              P1                             USABLE
IDX_PART_ADD_COL2              P2                             USABLE
IDX_PART_ADD_COL2              P3                             USABLE
IDX_PART_ADD_COL2              P4                             USABLE
IDX_PART_ADD_COL2              P5                             USABLE

select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_ADD_COL3';
 
INDEX_NAME                     STATUS
------------------------------ -------
IDX_PART_ADD_COL3              VALID

 
alter table part_tab_add  add PARTITION p6 values less than (60000);
alter table part_tab_add  add PARTITION p_max  values less than (maxvalue);

---分区add前
select index_name, partition_name, status
  from user_ind_partitions
 where index_name = 'IDX_PART_ADD_COL2';

INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ -------
IDX_PART_ADD_COL2              P1                             USABLE
IDX_PART_ADD_COL2              P2                             USABLE
IDX_PART_ADD_COL2              P3                             USABLE
IDX_PART_ADD_COL2              P4                             USABLE
IDX_PART_ADD_COL2              P5                             USABLE
IDX_PART_ADD_COL2              P6                             USABLE
IDX_PART_ADD_COL2              P_MAX                          USABLE
 
select index_name, status
  from user_indexes
 where index_name = 'IDX_PART_ADD_COL3';

INDEX_NAME                     STATUS
------------------------------ ------
IDX_PART_ADD_COL3              VALID

  交换索引:

/*
此处只说索引失效的场景(只会影响全局索引):
结论:全局索引truncate 分区和交换分区都会导致索引失效果
局部索引truncate分区不会导致索引失效。但是如果交换分区的时候,交换的临时表没有索引,或者
有索引,没有用including indexes的关键字,会导致局部的索引失效,就是某个分区失效
重建局部索引只能用alter index local_idx  rebuild partition p1这样的方式
*/

--分区表的exchange
connect ljb/ljb
drop table part_tab_exch purge;
create table part_tab_exch (id int,col2 int,col3 int,contents varchar2(4000))
        partition by range (id)
        (
         partition p1 values less than (10000),
         partition p2 values less than (20000),
         partition p3 values less than (30000),
         partition p4 values less than (40000),
         partition p5 values less than (50000),
         partition p_max values less than (maxvalue)
        )
        ;
insert into part_tab_exch select rownum ,rownum+1,rownum+2, rpad('*',400,'*') from dual connect by rownum <=60000;
commit;

create index idx_part_exch_col2 on part_tab_exch(col2) local;
create index idx_part_exch_col3 on part_tab_exch (col3);


--分区表的EXCHANGE(某分区和普通表之间的数据进行交换)
drop table normal_tab purge;
create table normal_tab (id int,col2 int,col3 int,contents varchar2(4000));
create index idx_norm_col2  on normal_tab (col2);


--未exchange 分区之前
select index_name,partition_name,status from  user_ind_partitions where index_name='IDX_PART_EXCH_COL2';
INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ --------
IDX_PART_EXCH_COL2             P1                             USABLE
IDX_PART_EXCH_COL2             P2                             USABLE
IDX_PART_EXCH_COL2             P3                             USABLE
IDX_PART_EXCH_COL2             P4                             USABLE
IDX_PART_EXCH_COL2             P5                             USABLE
IDX_PART_EXCH_COL2             P_MAX                          USABLE

select index_name,status from user_indexes where index_name='IDX_PART_EXCH_COL3';
INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_EXCH_COL3             VALID

alter table part_tab_exch exchange partition p1 with table normal_tab including indexes;


--exchange分区后
select index_name,partition_name,status from  user_ind_partitions where index_name='IDX_PART_EXCH_COL2';
INDEX_NAME                     PARTITION_NAME                 STATUS
------------------------------ ------------------------------ --------
IDX_PART_EXCH_COL2             P1                             USABLE
IDX_PART_EXCH_COL2             P2                             USABLE
IDX_PART_EXCH_COL2             P3                             USABLE
IDX_PART_EXCH_COL2             P4                             USABLE
IDX_PART_EXCH_COL2             P5                             USABLE
IDX_PART_EXCH_COL2             P_MAX                          USABLE

select index_name,status from user_indexes where index_name='IDX_PART_EXCH_COL3';

INDEX_NAME                     STATUS
------------------------------ --------
IDX_PART_EXCH_COL3             UNUSABLE

---注意,如果加上update global indexes 关键字,这个IDX_PART_EXCH_COL3就不会失效

--试验2(加Update GLOBAL  indexes关键字)

--分区表的exchange
connect ljb/ljb
drop table part_tab_exch purge;
create table part_tab_exch (id int,col2 int,col3 int,contents varchar2(4000))
        partition by range (id)
        (
         partition p1 values less than (10000),
         partition p2 values less than (20000),
         partition p3 values less than (30000),
         partition p4 values less than (40000),
         partition p5 values less than (50000),
         partition p_max values less than (maxvalue)
        )
        ;
insert into part_tab_exch select rownum ,rownum+1,rownum+2, rpad('*',400,'*') from dual connect by rownum <=60000;
commit;

create index idx_part_exch_col2 on part_tab_exch(col2) local;
create index idx_part_exch_col3 on part_tab_exch (col3);


--分区表的EXCHANGE(某分区和普通表之间的数据进行交换)
drop table normal_tab purge;
create table normal_tab (id int,col2 int,col3 int,contents varchar2(4000));
create index idx_norm_col2  on normal_tab (col2);


--未exchange 分区之前
select index_name,partition_name,status from  user_ind_partitions where index_name='IDX_PART_EXCH_COL2';
select index_name,status from user_indexes where index_name='IDX_PART_EXCH_COL3';

alter table part_tab_exch exchange partition p1 with table normal_tab including indexes Update GLOBAL  indexes;

--exchange分区后
select index_name,partition_name,status from  user_ind_partitions where index_name='IDX_PART_EXCH_COL2';
select index_name,status from user_indexes where index_name='IDX_PART_EXCH_COL3';

  失去分区表的意义:

-- 范围分区示例
drop table range_part_tab purge;
--注意,此分区为范围分区

--例子1
create table range_part_tab (id number,deal_date date,area_code number,nbr number,contents varchar2(4000))
           partition by range (deal_date)
           (
           partition p_201301 values less than (TO_DATE('2013-02-01', 'YYYY-MM-DD')),
           partition p_201302 values less than (TO_DATE('2013-03-01', 'YYYY-MM-DD')),
           partition p_201303 values less than (TO_DATE('2013-04-01', 'YYYY-MM-DD')),
           partition p_201304 values less than (TO_DATE('2013-05-01', 'YYYY-MM-DD')),
           partition p_201305 values less than (TO_DATE('2013-06-01', 'YYYY-MM-DD')),
           partition p_201306 values less than (TO_DATE('2013-07-01', 'YYYY-MM-DD')),
           partition p_201307 values less than (TO_DATE('2013-08-01', 'YYYY-MM-DD')),
           partition p_max values less than (maxvalue)
           )
           ;


--以下是插入2013年一整年日期随机数和表示福建地区号含义(591到599)的随机数记录,共有10万条,如下:
insert into range_part_tab (id,deal_date,area_code,nbr,contents)
      select rownum,
             to_date( to_char(sysdate-365,'J')+TRUNC(DBMS_RANDOM.VALUE(0,365)),'J'),
             ceil(dbms_random.value(591,599)),
             ceil(dbms_random.value(18900000001,18999999999)),
             rpad('*',400,'*')
        from dual
      connect by rownum <= 100000;
commit;



--以下是插入2014年一整年日期随机数和表示福建地区号含义(591到599)的随机数记录,共有10万条,如下:
insert into range_part_tab (id,deal_date,area_code,nbr,contents)
      select rownum,
             to_date( to_char(sysdate,'J')+TRUNC(DBMS_RANDOM.VALUE(0,365)),'J'),
             ceil(dbms_random.value(591,599)),
             ceil(dbms_random.value(18900000001,18999999999)),
             rpad('*',400,'*')
        from dual
      connect by rownum <= 100000;
commit;

--经常有类似如下的案例:由于规划失误,数据都进默认分区,导致默认分区超大,失去分区表的意义。

SQL> select count(*) from range_part_tab partition (p_201301);
  COUNT(*)
----------
     16858
SQL> select count(*) from range_part_tab partition (p_201302);
  COUNT(*)
----------
      7664
SQL> select count(*) from range_part_tab partition (p_201303);
  COUNT(*)
----------
      8484
SQL> select count(*) from range_part_tab partition (p_201304);
  COUNT(*)
----------
      8177
SQL> select count(*) from range_part_tab partition (p_201305);
  COUNT(*)
----------
      8414
SQL> select count(*) from range_part_tab partition (p_201306);
  COUNT(*)
----------
      8245
SQL> select count(*) from range_part_tab partition (p_201307);
  COUNT(*)
----------
      8565
SQL> select count(*) from range_part_tab partition (p_max);
  COUNT(*)
----------
    133593

  分区表比普通表性能差:

--构造分区表,插入数据。
drop table part_tab purge;
create table part_tab (id int,col2 int,col3 int)
        partition by range (id)
        (
        partition p1 values less than (10000),
        partition p2 values less than (20000),
        partition p3 values less than (30000),
        partition p4 values less than (40000),
        partition p5 values less than (50000),
        partition p6 values less than (60000),
        partition p7 values less than (70000),
        partition p8 values less than (80000),
        partition p9 values less than (90000),
        partition p10 values less than (100000),
        partition p11 values less than (maxvalue)
        )
        ;
insert into part_tab select rownum,rownum+1,rownum+2 from dual connect by rownum <=110000;
commit;
create  index idx_par_tab_col2 on part_tab(col2) local;
create  index idx_par_tab_col3 on part_tab(col3) ;


--构造普通表,表结构和数据量都与分区表一样。

drop table norm_tab purge;
create table norm_tab  (id int,col2 int,col3 int);
insert into norm_tab select rownum,rownum+1,rownum+2 from dual connect by rownum <=110000;
commit;
create  index idx_nor_tab_col2 on norm_tab(col2) ;
create  index idx_nor_tab_col3 on norm_tab(col3) ;

set autotrace traceonly
set linesize 1000
set timing on 

--第1组试验
--首先查看分区表的性能
select * from part_tab where col2=8 ;
执行计划
-----------------------------------------------------------------------------------------------------------------------
| Id  | Operation                          | Name             | Rows  | Bytes | Cost (%CPU)| Time     | Pstart| Pstop |
-----------------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                   |                  |     1 |    39 |    13   (0)| 00:00:01 |       |       |
|   1 |  PARTITION RANGE ALL               |                  |     1 |    39 |    13   (0)| 00:00:01 |     1 |    11 |
|   2 |   TABLE ACCESS BY LOCAL INDEX ROWID| PART_TAB         |     1 |    39 |    13   (0)| 00:00:01 |     1 |    11 |
|*  3 |    INDEX RANGE SCAN                | IDX_PAR_TAB_COL2 |     1 |       |    12   (0)| 00:00:01 |     1 |    11 |
-----------------------------------------------------------------------------------------------------------------------
  3 - access("COL2"=8)
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
         24  consistent gets
          0  physical reads
          0  redo size
        539  bytes sent via SQL*Net to client
        415  bytes received via SQL*Net from client
          2  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          1  rows processed

--再查看普通表的性能
select * from norm_tab where col2=8 ;
执行计划
------------------------------------------------------------------------------------------------
| Id  | Operation                   | Name             | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT            |                  |     1 |    39 |     2   (0)| 00:00:01 |
|   1 |  TABLE ACCESS BY INDEX ROWID| NORM_TAB         |     1 |    39 |     2   (0)| 00:00:01 |
|*  2 |   INDEX RANGE SCAN          | IDX_NOR_TAB_COL2 |     1 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------
 2 - access("COL2"=8)
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
          4  consistent gets
          0  physical reads
          0  redo size
        543  bytes sent via SQL*Net to client
        415  bytes received via SQL*Net from client
          2  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          1  rows processed



--查看索引高度等信息
set autotrace off
select index_name,
          blevel,
          leaf_blocks,
          num_rows,
          distinct_keys,
          clustering_factor
     from user_ind_statistics
    where table_name in( 'NORM_TAB');
    
select index_name,
          blevel,
          leaf_blocks,
          num_rows,
          distinct_keys,
          clustering_factor FROM USER_IND_PARTITIONS where index_name like 'IDX_PAR_TAB%';


--再看看另外两组试验:
set autotrace traceonly

--第2组试验
select * from part_tab where col2=8 and id=2;
执行计划
----------------------------------------------------------------------------------------------------------------------
| Id  | Operation                          | Name             | Rows  | Bytes | Cost (%CPU)| Time     | Pstart| Pstop |
-----------------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                   |                  |     1 |    39 |     2   (0)| 00:00:01 |       |       |
|   1 |  PARTITION RANGE SINGLE            |                  |     1 |    39 |     2   (0)| 00:00:01 |     1 |     1 |
|*  2 |   TABLE ACCESS BY LOCAL INDEX ROWID| PART_TAB         |     1 |    39 |     2   (0)| 00:00:01 |     1 |     1 |
|*  3 |    INDEX RANGE SCAN                | IDX_PAR_TAB_COL2 |     1 |       |     1   (0)| 00:00:01 |     1 |     1 |
-----------------------------------------------------------------------------------------------------------------------
  2 - filter("ID"=2)
  3 - access("COL2"=8)
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
          3  consistent gets
          0  physical reads
          0  redo size
        399  bytes sent via SQL*Net to client
        404  bytes received via SQL*Net from client
          1  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          0  rows processed
          
select * from norm_tab where col2=8 and id=2;

执行计划
----------------------------------------------------------
Plan hash value: 3649198428

------------------------------------------------------------------------------------------------
| Id  | Operation                   | Name             | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT            |                  |    10 |   390 |     2   (0)| 00:00:01 |
|*  1 |  TABLE ACCESS BY INDEX ROWID| NORM_TAB         |    10 |   390 |     2   (0)| 00:00:01 |
|*  2 |   INDEX RANGE SCAN          | IDX_NOR_TAB_COL2 |     4 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------
  1 - filter("ID"=2)
  2 - access("COL2"=8) 
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
        329  consistent gets
          0  physical reads
          0  redo size
        399  bytes sent via SQL*Net to client
        404  bytes received via SQL*Net from client
          1  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          0  rows processed
          
          
--第3组试验
select * from part_tab where col3=8 ;
执行计划
-----------------------------------------------------------------------------------------------------------------------
| Id  | Operation                          | Name             | Rows  | Bytes | Cost (%CPU)| Time     | Pstart| Pstop |
-----------------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                   |                  |     1 |    15 |     2   (0)| 00:00:01 |       |       |
|   1 |  TABLE ACCESS BY GLOBAL INDEX ROWID| PART_TAB         |     1 |    15 |     2   (0)| 00:00:01 | ROWID | ROWID |
|*  2 |   INDEX RANGE SCAN                 | IDX_PAR_TAB_COL3 |     1 |       |     1   (0)| 00:00:01 |       |       |
-----------------------------------------------------------------------------------------------------------------------
   2 - access("COL3"=8)
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
          4  consistent gets
          0  physical reads
          0  redo size
        543  bytes sent via SQL*Net to client
        415  bytes received via SQL*Net from client
          2  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          1  rows processed

select * from norm_tab where col3=8 ;
执行计划
------------------------------------------------------------------------------------------------
| Id  | Operation                   | Name             | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT            |                  |     1 |    15 |     2   (0)| 00:00:01 |
|   1 |  TABLE ACCESS BY INDEX ROWID| NORM_TAB         |     1 |    15 |     2   (0)| 00:00:01 |
|*  2 |   INDEX RANGE SCAN          | IDX_NOR_TAB_COL3 |     1 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------
   2 - access("COL3"=8)
统计信息
----------------------------------------------------------
          0  recursive calls
          0  db block gets
          4  consistent gets
          0  physical reads
          0  redo size
        543  bytes sent via SQL*Net to client
        415  bytes received via SQL*Net from client
          2  SQL*Net roundtrips to/from client
          0  sorts (memory)
          0  sorts (disk)
          1  rows processed

  统计信息引发的性能:

--试验1

DROP TABLE t1 CASCADE CONSTRAINTS PURGE; 
DROP TABLE t2 CASCADE CONSTRAINTS PURGE; 
CREATE TABLE t1 (
     id NUMBER NOT NULL,
     n NUMBER,
     contents VARCHAR2(4000)
   )
   ; 
   --基于会话方式创建全局临时表
CREATE global temporary table t2 (
     id NUMBER NOT NULL,
     t1_id NUMBER NOT NULL,
     n NUMBER,
     contents VARCHAR2(4000)
   ) on commit preserve rows 
   ;   
execute dbms_random.seed(0); 
INSERT INTO t1
     SELECT  rownum,  rownum, dbms_random.string('a', 50)
       FROM dual
     CONNECT BY level <= 10
      ORDER BY dbms_random.random; 
INSERT INTO t2 SELECT rownum, rownum, rownum, dbms_random.string('b', 50) FROM dual CONNECT BY level <= 100000
    ORDER BY dbms_random.random; 
COMMIT; 
select count(*) from t1;
select count(*) from t2;

set linesize 1000 
alter session set statistics_level=all ;
SELECT *
FROM t1, t2
WHERE t1.id = t2.t1_id;
select * from table(dbms_xplan.display_cursor(null,null,'allstats last'));
----------------------------------------------------------------------------------------------------------------
| Id  | Operation          | Name | Starts | E-Rows | A-Rows |   A-Time   | Buffers |  OMem |  1Mem | Used-Mem |
----------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT   |      |      1 |        |     10 |00:00:00.05 |     991 |       |       |          |
|*  1 |  HASH JOIN         |      |      1 |     10 |     10 |00:00:00.05 |     991 |   742K|   742K| 1092K (0)|
|   2 |   TABLE ACCESS FULL| T1   |      1 |     10 |     10 |00:00:00.01 |       7 |       |       |          |
|   3 |   TABLE ACCESS FULL| T2   |      1 |  90564 |    100K|00:00:00.02 |     984 |       |       |          |
----------------------------------------------------------------------------------------------------------------
--试验2(收集了全局临时表的统计信息)


DROP TABLE t1 CASCADE CONSTRAINTS PURGE; 
DROP TABLE t2 CASCADE CONSTRAINTS PURGE; 
CREATE TABLE t1 (
     id NUMBER NOT NULL,
     n NUMBER,
     contents VARCHAR2(4000)
   )
   ; 
CREATE global temporary table t2 (
     id NUMBER NOT NULL,
     t1_id NUMBER NOT NULL,
     n NUMBER,
     contents VARCHAR2(4000)
   ) on commit preserve rows 
   ;   
execute dbms_random.seed(0); 
INSERT INTO t1
     SELECT  rownum,  rownum, dbms_random.string('a', 50)
       FROM dual
     CONNECT BY level <= 10
      ORDER BY dbms_random.random; 
INSERT INTO t2 SELECT rownum, rownum, rownum, dbms_random.string('b', 50) FROM dual CONNECT BY level <= 100000
    ORDER BY dbms_random.random; 
COMMIT; 

--这个时候,如果系统曾经对全局临时表收集过统计信息,这个肯定不可能正好落在这个SESSION收集统计信息,所以
我们做试验的时候,退出当前SESSION,开新的SESSION执行如下
exec dbms_stats.gather_table_stats(ownname => 'LJB',tabname => 'T2',estimate_percent => 10,method_opt=> 'for all indexed columns',cascade=>TRUE) ;  

select table_name,
       partition_name,
       last_analyzed,
       partition_position,      
       num_rows
  from user_tab_statistics t
 where table_name ='T2';
 
 
 --然后回到刚才的SESSION,继续操作
select count(*) from t1;
select count(*) from t2;


set linesize 1000 
alter session set statistics_level=all ;
SELECT *
FROM t1, t2
WHERE t1.id = t2.t1_id;
select * from table(dbms_xplan.display_cursor(null,null,'allstats last'));

----------------------------------------------------------------------------------------------------------------
| Id  | Operation          | Name | Starts | E-Rows | A-Rows |   A-Time   | Buffers |  OMem |  1Mem | Used-Mem |
----------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT   |      |      1 |        |     10 |00:00:00.10 |    1970 |       |       |          |
|*  1 |  HASH JOIN         |      |      1 |      1 |     10 |00:00:00.10 |    1970 |  9472K|  1956K|    9M (0)|
|   2 |   TABLE ACCESS FULL| T2   |      1 |      1 |    100K|00:00:00.02 |    1962 |       |       |          |
|   3 |   TABLE ACCESS FULL| T1   |      1 |     10 |     10 |00:00:00.01 |       8 |       |       |          |
----------------------------------------------------------------------------------------------------------------

  各接口程序的经典优化:

主要应用了临时表各个SESSION数据独立的特性。
可以考虑用同样一张表名,给不同的应用接口使用。
传统的老方法是有10个接口程序就处理10张不同的中间接口表,在里面增加一个表示类型的字段,来区别,这样对扩展性和性能都有害。


drop table t_global;
create global temporary table t_global on commit preserve rows as select  * from dba_objects where 1=2;
select table_name,temporary,duration from user_tables  where table_name='T_GLOBAL';

--不同session的例子试验基于sesssion 的临时表即可了,不用试验另外一个了。
---连上session 1(比如是业务1的进程)
 connect ljb/ljb
 
 insert into t_global select * from dba_objects where rownum<=10;
 --可以体会提交,基于session 的提交并清理数据
 commit;
 select count(*) from t_global;


  COUNT(*)
----------
    10
 
继续登录session 2(比如是业务2的进程)
connect yxl/yxl 

insert into t_global select * from dba_objects where rownum<=20;
commit;
select count(*) from t_global;

  COUNT(*)
----------
    20

--业务n的进程......

  字段设计和空间换时间:

思路:

    把用于中间运算的全局临时表增加字段,从而减少了在一些业务场景下的表连接次数,这是一个空间换时间的概念,需要权衡利弊。
    
相关案例概述:

某次生产系统中发现一条语句执行的很慢,经过查看该语句十表关联, 执行计划甚为复杂,类似如下:

    SELECT *
   FROM t1           a,
        t_global_tab b,
        t2           c,
        t3           d,
        t4           e,
        t5           f,
        ...
  WHERE a.id=b.id
     and ...
     and ...


后来经过业务的推敲和确认,发现将这个t_gloabl_tab临时表增加几个字段,就无需到那么多表中获整其他相关信息了。
改造后的SQL写法,从原来的10表关联瞬间变成4表关联。性能大大提升了!

  

日志暴增相关的故障:
某次系统出现REDO暴增的情况,经过查询,发现有大量的delete语句在操作,而该语句实质是在做中间运算,将临时数据先存在中间表,处理完毕后删除该表。
后来把这个表改为全局临时表后,REDO暴增的情况得以大大缓和,系统性能同时也得以提升。

---试验准备工作,建观察redo的视图
sqlplus "/ as sysdba"
grant all on v_$mystat to ljb;
grant all on v_$statname to ljb;
connect  ljb/ljb
drop table t purge;
create table t as select * from dba_objects ;
--以下创建视图,方便后续直接用select * from v_redo_size进行查询
create or replace view v_redo_size as
    select a.name,b.value
    from v$statname a,v$mystat b
    where a.statistic#=b.statistic#
    and a.name='redo size';


--方案1
drop table t_tmp purge;
create table t_tmp (id int,col2 int ,col3 int,contents varchar2(4000));

select * from v_redo_size;

NAME                                                                  VALUE
---------------------------------------------------------------- ----------
redo size                                                              9988

begin 
   insert into t_tmp select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=10000; 
   --临时插入t_tmp表后,接下来删除该临时表记录,中间略去了大部分逻辑
   delete from t_tmp ;
   commit;
end; 
/

select * from v_redo_size;

NAME                                                                  VALUE
---------------------------------------------------------------- ----------
redo size                                                          11385896

总共产生日志量为:11385896-9988=11375908

--方案2
--退出session,连到新的session上完成如下操作
drop table t_global purge;
create global temporary table t_global (id int,col2 int ,col3 int,contents varchar2(4000)) on commit delete rows;

select * from v_redo_size;

NAME                                                                  VALUE
---------------------------------------------------------------- ----------
redo size                                                             42272

begin 
   insert into t_global select rownum ,rownum+1,rownum+2,rpad('*',400,'*') from dual connect by rownum <=10000; 
   --临时插入t_global表后,如下删除临时表记录的delete动作可以不做,commit后数据自动清理
   --delete from t_global ;
  commit;
end; 
/

select * from v_redo_size;
NAME                                                                  VALUE
---------------------------------------------------------------- ----------
redo size                                                             209152

总共产生日志量为:209152-42272=166880

                               11385896

  监控总结的脚本:

监控表中有没有过时类型的字段:

select table_name, 
       column_name, 
       data_type
  from user_tab_columns
 where data_type in ( 'LONG','CHAR');

------------------------------------------------------------------------------------

drop table t_long purge;
create table t_long (id int, name long);
drop table t_char purge;
create table t_char (id int, address char(10));
set linesize 266
col table_name  format a25
col column_name format a25
col data_type   format a20

SQL> select table_name,
  2         column_name,
  3         data_type
  4    from user_tab_columns
  5   where data_type in ( 'LONG','CHAR');

TABLE_NAME                COLUMN_NAME             DATA_TYPE
------------------------- ----------------------------------
T_CHAR                    ADDRESS                   CHAR
T_LONG                    NAME                      LONG

  监控当前有多少带子分区的分区表:

select table_name, 
       partitioning_type, 
       subpartitioning_type, 
       partition_count
  from user_part_tables
 where subpartitioning_type <> 'NONE';
 
select count(*) from user_part_tables where  subpartitioning_type <> 'NONE';

  监控分区表各分区大小严重不均匀情况:

--以下脚本可以分析分区表记录不平衡的情况,同时也可以从侧面发现由于疏于管理,大量当前数据进入默认分区的情况

--统计信息系统一般会自动收集,这只是首次建成表后需要操作一下,以方便测试
exec dbms_stats.gather_table_stats(ownname => 'LJB',tabname => 'RANGE_PART_TAB',estimate_percent => 10,method_opt=> 'for all indexed columns',cascade=>TRUE) ;  

--当前用户下,某个分区的记录数是平均记录数的2倍以上
set linesize 266
col table_name format a20
select table_name,
       max(num_rows),
       trunc(avg(num_rows),0),
       sum(num_rows),
       trunc(max(num_rows) / sum(num_rows),2),
       count(*)
  from user_tab_partitions
 group by table_name
having max(num_rows) / sum(num_rows) > 2 / count(*);

--也可用来作为判断查询当前用户下有因为疏于分区管理导致无大量数据进了建默认分区的参考。

select table_name,
       partition_name,
       num_rows  
 from user_tab_partitions
 where table_name = 'RANGE_PART_TAB'
 order by num_rows desc;

  监控分区数过多的表:

prompt <p>当前用户下分区最多的前10个对象
select *
  from (select table_owner, table_name, count(*) cnt
          from user_tab_partitions
         group by table_owner, table_name
         order by cnt desc)
where rownum <= 10;

prompt <p>当前用户下分区个数超过100个的表
select  table_name, count(*) cnt
  from user_tab_partitions
 having count(*) >= 100
group by table_name, table_name
order by cnt desc;

--或者如下更方便
select table_name, partitioning_type, subpartitioning_type
  from user_part_tables
 where partition_count > 100;

  监控哪些全局临时表被收集统计信息:

select owner, 
       table_name, 
       t.last_analyzed, 
       t.num_rows, 
       t.blocks
  from user_tables t
where t.temporary = 'Y'
   and last_analyzed is not null;

  监控哪些外键未建索引:

--查看当前数据库哪些对象外键没建索引
select table_name,
       constraint_name,
       cname1 || nvl2(cname2, ',' || cname2, null) ||
       nvl2(cname3, ',' || cname3, null) ||
       nvl2(cname4, ',' || cname4, null) ||
       nvl2(cname5, ',' || cname5, null) ||
       nvl2(cname6, ',' || cname6, null) ||
       nvl2(cname7, ',' || cname7, null) ||
       nvl2(cname8, ',' || cname8, null) columns
  from (select b.table_name,
               b.constraint_name,
               max(decode(position, 1, column_name, null)) cname1,
               max(decode(position, 2, column_name, null)) cname2,
               max(decode(position, 3, column_name, null)) cname3,
               max(decode(position, 4, column_name, null)) cname4,
               max(decode(position, 5, column_name, null)) cname5,
               max(decode(position, 6, column_name, null)) cname6,
               max(decode(position, 7, column_name, null)) cname7,
               max(decode(position, 8, column_name, null)) cname8,
               count(*) col_cnt
          from (select substr(table_name, 1, 30) table_name,
                       substr(constraint_name, 1, 30) constraint_name,
                       substr(column_name, 1, 30) column_name,
                       position
                  from user_cons_columns) a,
               user_constraints b
         where a.constraint_name = b.constraint_name
           and b.constraint_type = 'R'
         group by b.table_name, b.constraint_name) cons
 where col_cnt > ALL
 (select count(*)
          from user_ind_columns i
         where i.table_name = cons.table_name
           and i.column_name in (cname1, cname2, cname3, cname4, cname5,
                cname6, cname7, cname8)
           and i.column_position <= cons.col_cnt
         group by i.index_name)


查询所有含外键的表

select count(*),TABLE_NAME,c_constraint_name from (
select a.table_name,       
       substr(a.constraint_name, 1, 30) c_constraint_name,
       substr(a.column_name, 1, 30) column_name,
       position,
       b.owner,
       b.constraint_name,
       b.constraint_type
  from user_cons_columns a, user_constraints b
 where a.constraint_name = b.constraint_name
   and b.constraint_type = 'R' )
   group by TABLE_NAME,c_constraint_name

  监控失效分区索引:

prompt <p>查询当前用户下,失效-普通索引 
select t.index_name,
       t.table_name,
       blevel,
       t.num_rows,
       t.leaf_blocks,
       t.distinct_keys
  from user_indexes t
where status = 'INVALID';

prompt <p>查询当前用户下的失效-分区索引
select t1.blevel,
       t1.leaf_blocks,
       t1.INDEX_NAME,
       t2.table_name,
       t1.PARTITION_NAME,
       t1.STATUS
  from user_ind_partitions t1, user_indexes t2
where t1.index_name = t2.index_name
   and t1.STATUS = 'UNUSABLE';

  监控未建分区的大表:

prompt <p>当前用户下,表大小超过10个GB未建分区的
select owner,
       segment_name,
       segment_type,
       sum(bytes) / 1024 / 1024 / 1024 object_size
  from user_segments
WHERE segment_type = 'TABLE'
group by owner, segment_name, segment_type
having sum(bytes) / 1024 / 1024 / 1024 >= 10
order by object_size desc;

  普通表转化成分区表的高效脚本:

create or replace package pkg_deal_part_tab
Authid Current_User
as

 ----------------------------------------------------------------------------------------------
/*
 功能: 实现普通表到分区表自动改造工作
 <2>. 参数含义:
    p_tab in varchar2,                 -----------------需要进行分区的普通表的表名
    p_struct_only                      -----------------新表是否是只建表结构不导入数据,0为只建结构,非0如1等值,为导数据
    p_deal_flag in number default 0,   -----------------0为不执行,只将脚本记录进part_tab_log的sql_text字段中,1为不仅记录,而且执行!
    p_parallel in number default 4,    -----------------分区操作中的并行度,默认为4
    p_part_colum in varchar2,          -----------------需要分区的列,本程序主要针对最常见的时间范围类型分区,按月份分区
    p_part_nums in number default 24,  -----------------默认情况下为最近一个月到后续24个月,可以自行选择
    p_tablespace IN VARCHAR2           -----------------分区表的数据表空间
    p_idx_tablespace IN VARCHAR2       -----------------分区的索引表空间
 -----------------------------------------------------------------------------------------------------------------------------
 <3>.注意点:
     1.被改造的表必须要有时间字段,我们使用时间字段来进行分区
     2. PART_TAB_LOG是用来记录操作日志的(该表由程序自动生成,无需手动建立)
     3.在不执行脚本,只获取脚本时,脚本可以用如下方式获取,并手工执行
     select sql_text||';' from part_tab_log t where tab_name='ts_real_datatrance' order by exec_order1,exec_order2  
     4. 从理论上来说,p_main调用的001-010这10个模块的程序一起成功,一起失败,在一个事务里,才是合理的!
        但是由于都是DDL语句,所以无法实现,这是一个遗憾。
 --------------------------------------------------------------------------------------------------------------------------------
 <4>. 试验检验脚本
  <4.1> 构造
  ---下面通过我构造的试验,可以详细的让大家试验,了解。我的这个T2表,涵盖了上面的所有属性甚至特殊的属性,比如组合主键,组合外键,组合索引等
    drop table t1 CASCADE CONSTRAINTS;
    drop table t2 CASCADE CONSTRAINTS;
    DROP TABLE T3 CASCADE CONSTRAINTS;
    drop table t2_&mmdd;
    CREATE TABLE T1 (ID1 NUMBER,ID2 NUMBER, PRIMARY KEY (ID1,ID2));
    CREATE TABLE T3 (ID1 NUMBER,  PRIMARY KEY (ID1));
    CREATE TABLE T2 (T2ID   NUMBER DEFAULT 8,
                     T1ID1  NUMBER NOT NULL,
                     T1ID2  NUMBER, 
                     deal_date date,
                     primary key (T2ID,T1ID1),
                     FOREIGN KEY (T1ID1,T1ID2) REFERENCES T1(ID1,ID2),
                     FOREIGN KEY (T2ID) REFERENCES T3(ID1));
    alter table T2 add constraint CKC_T2 check (T1ID1>=0);
    comment on column T2.T2ID is '标识';
    comment on table T2 is '分区测试表';
    create index idx_t1id2 on t2(t1id1);
    create unique index idx_t2id2 on t2(t1id2);
    --以下构造数据
    insert into t1 select rownum,rownum+1 from dual connect by level<=200;
    insert into t3 select rownum from dual connect by level<=200;
    insert into t2 select rownum,rownum+1,rownum+2,sysdate from dual connect by level<=100;
    commit;
   <4.2> 测试(如果将p_deal_flag=0则是不执行,只记录日志,我们选择手动从日志中取脚本执行,1是既记录日志,又执行)
    set serveroutput on size 1000000
    exec pkg_deal_part_tab.p_do(p_tab => 'T2', p_deal_flag => 1, p_parallel => 4, p_part_colum => 'deal_date',p_part_nums=> 24,p_struct_only =>1 ,p_tab_tablespace => '&表空间名',p_idx_tablespace => '&索引空间名');         
-------------------------------------------------------------------------------------------  
*/

   procedure p_rename_001         (p_tab in varchar2 );   
   procedure p_ctas_002           (p_tab in varchar2,
                                   p_struct_only  in number,
                                   p_deal_flag in number,
                                   p_part_colum in varchar2,
                                   p_parallel in number default 4,
                                   p_tablespace IN VARCHAR2);                                  
   procedure p_split_part_003     (p_tab in varchar2,
                                   p_deal_flag in number,
                                   p_part_nums in number default 24,
                                   p_tab_tablespace IN VARCHAR2);                                   
   procedure p_tab_comments_004   (p_tab in varchar2,p_deal_flag in number); 
   procedure p_col_comments_005   (p_tab in varchar2,p_deal_flag in number); 
   procedure p_defau_and_null_006 (p_tab in varchar2,p_deal_flag in number);
   procedure p_check_007          (p_tab in varchar2,p_deal_flag in number );
   procedure p_index_008          (p_tab in varchar2,p_deal_flag in number,p_idx_tablespace IN VARCHAR2);
   procedure p_pk_009             (p_tab in varchar2,p_deal_flag in number,p_idx_tablespace IN VARCHAR2);
   procedure p_constraint_010     (p_tab in varchar2,p_deal_flag in number);
   /*procedure p_main               (p_tab in varchar2,
                                   p_deal_flag      in number default 0,
                                   p_parallel       in number default 4,
                                   p_part_colum     in varchar2,
                                   p_part_nums      in number default 24,
                                   p_struct_only    in number default 0,
                                   p_tab_tablespace IN VARCHAR2,
                                   p_idx_tablespace IN VARCHAR2);*/
   procedure p_do                 (p_tab in varchar2,                
                                   p_deal_flag in number default 0,    
                                   p_parallel in number default 4,     
                                   p_part_colum in varchar2,           
                                   p_part_nums in number default 24,   
                                   p_struct_only in number default 0, 
                                   p_tab_tablespace IN VARCHAR2,       
                                   p_idx_tablespace IN VARCHAR2);

end pkg_deal_part_tab;
/
create or replace package body pkg_deal_part_tab
as
--YYYYMMDD      VARCHAR2(50) :=TO_CHAR(TRUNC(SYSDATE),'yyyymmdd'); --全局生效,很多过程需要引用
YYYYMMDD        VARCHAR2(50) :=TO_CHAR(TRUNC(SYSDATE),'mmdd'); --考虑到有的表名比较长,修改为mmdd

  procedure p_create_log_tab 
  as
  /*
   功能:创建分区日志记录表,以方便记录操作的步骤到part_tab_log表中
  */
  v_sql_p_create_log_tab         varchar2(4000);
  begin
   v_sql_p_create_log_tab:=
    '  create table PART_TAB_LOG
     (
      TAB_NAME      VARCHAR2(200),
      DEAL_MODEL    VARCHAR2(200),
      SQL_TEXT      VARCHAR2(4000),
      DEAL_TIME     DATE,
      remark        VARCHAR2(4000),
      exec_order1   number,
      exec_order2   number
     )';
    execute immediate v_sql_p_create_log_tab;
    dbms_output.put_line('程序已经自动完成了PART_TAB_LOG表的建立');
  end p_create_log_tab;
  

  procedure p_insert_log(p_tab in varchar2,v_deal_model in varchar2,v_sql_text in varchar2,v_remark in varchar2 default null,v_exec_order1 number default 0,v_exec_order2 number default 0)
  as
  /*
   功能:记录日志,将各步骤中拼的SQL保存起来
   难点:因为part_tab_log是程序自己建的,所以需要改造为如下动态SQL,构造中要考虑单引号的替换
  */
  v_sql_p_insert_log         varchar2(4000);
  v_sql_text1                varchar2(4000);
  begin
     --insert into PART_TAB_LOG (TAB_NAME ,deal_model,SQL_TEXT,DEAL_TIME,REMARK,exec_order1,exec_order2)  values (p_tab,v_deal_model,v_SQL_text,SYSDATE,v_remark,v_exec_order1,v_exec_order2);
   v_sql_text1 := REPLACE(v_sql_text,'''',''''''); ---先将字符串中的单引号用双引号代替
   v_sql_p_insert_log:= 'insert into PART_TAB_LOG (TAB_NAME ,deal_model,SQL_TEXT,DEAL_TIME,REMARK,exec_order1,exec_order2)  values ('||''''||p_tab||''''||','||''''||v_deal_model||''''||','||''''||v_SQL_text1||''''||','||''''||SYSDATE||''''||','||''''||v_remark||''''||','||v_exec_order1||','||v_exec_order2||')';
   
   --DBMS_OUTPUT.PUT_LINE( v_sql_p_insert_log);--调试使用
  /*
   仅仅调试使用,有的时候由于ORA-20000: ORU-10028: line length overflow, limit of 255 chars per line导致DBMS_OUTPUT不可用,所以第一建立一个仅有一个VARCHAR2(4000)字段的表,将字符串插入其中来调试
   INSERT INTO TEST VALUES (v_sql_p_insert_log);
   COMMIT;
   */
   execute immediate v_sql_p_insert_log;
  
   commit;
  end p_insert_log;




  procedure p_if_judge(v_sql in varchar2,p_deal_flag in number )
  as
  /*
   功能:在获取到V_SQL的动态SQL后,是否EXECUTE IMMEDIATE执行前做一个判断,
         根据p_deal_flag的入参而定,0为不执行,非0为执行
  */
  begin
   if p_deal_flag=0 then
    null;
   else
      execute immediate(v_sql);
   end if;
  end p_if_judge;





  procedure p_rename_001 (p_tab in varchar2)
  as
  /*
   功能:将原表重命名为_yyyymmdd格式的表名
   完善点: 要考虑RENMAE的目标表已存在的情况,先做判断
  */
  V_CNT_RE_TAB  NUMBER(9) :=0;
  v_sql_p_rename         varchar2(4000);
  begin
  SELECT  COUNT(*)   INTO V_CNT_RE_TAB FROM user_objects where object_name=UPPER(P_TAB||'_'||YYYYMMDD);
    if V_CNT_RE_TAB=0 then
           v_sql_p_rename:= 'rename '||P_TAB ||' to '||P_TAB||'_'||YYYYMMDD;
       --   DBMS_OUTPUT.PUT_LINE(v_sql_p_rename);--调试使用
           p_insert_log(p_tab,'P_RENAME',v_sql_p_rename,'完成原表的重命名,改为_YYYYMMDD形式',1);
           execute immediate(v_sql_p_rename); --这里无需做判断,rename动作真实完成!如果后续只是为生成脚本而不是真实执行分区操作,最后再把这个表RENAME回去!
    ELSE  
           RAISE_APPLICATION_ERROR(-20066,'备份表'||P_TAB||'_'||YYYYMMDD||'已存在,请先删除或重命名该备份表后再继续执行!');
         --  DBMS_OUTPUT.PUT_LINE('备份表'||P_TAB||'_'||YYYYMMDD||'已存在');
    end if;
    DBMS_OUTPUT.PUT_LINE('操作步骤1(备份原表)-------将'||p_tab ||' 表RENMAE成 '||p_tab||'_'||YYYYMMDD||',并删除其约束索引等');
  end p_rename_001;





  procedure p_ctas_002 (p_tab in varchar2,
                        p_struct_only  in number,
                        p_deal_flag in number,
                        p_part_colum in varchar2,
                        p_parallel in number default 4,
                        p_tablespace IN VARCHAR2)
  as
   /*
   功能:用CREATE TABLE AS SELECT 的方式从RENAME的_yyyymmdd表中新建出一个只有MAXVALUE的初步分区表
   完善点:要考虑并行,nologging 的提速方式,也要考虑最终将NOLOGGING和PARALLEL恢复成正常状态
  */
  v_sql_p_ctas         varchar2(4000);
  begin
       v_sql_p_ctas:='create table '||p_tab 
       ||' partition by range ( '||p_part_colum||' ) ('
       || ' partition P_MAX  values less than (maxvalue))'||
       ' nologging parallel 4  tablespace '||p_tablespace||
       ' as select /*+parallel(t,'||p_parallel||')*/ *'||
       ' from '|| P_TAB||'_'||YYYYMMDD ;
      if p_struct_only=0 then
        v_sql_p_ctas:=v_sql_p_ctas ||' where 1=2';
      else
        v_sql_p_ctas:=v_sql_p_ctas ||' where 1=1';
        end if;
       --DBMS_OUTPUT.PUT_LINE(v_sql_p_ctas);--调试使用
       p_insert_log(p_tab,'p_ctas',v_sql_p_ctas,'完成CTAS建初步分区表',2,1);
       p_if_judge(v_sql_p_ctas,p_deal_flag);

       v_sql_p_ctas:='alter table '|| p_tab ||' logging';
       p_insert_log(p_tab,'p_ctas',v_sql_p_ctas,'将新分区表修改回LOGGING属性',2,2);
       p_if_judge(v_sql_p_ctas,p_deal_flag);

       v_sql_p_ctas:='alter table '|| p_tab || ' noparallel';
       p_insert_log(p_tab,'p_ctas',v_sql_p_ctas,'将新分区表修改回NOPARALLEL属性',2,3);
       p_if_judge(v_sql_p_ctas,p_deal_flag);
    DBMS_OUTPUT.PUT_LINE('操作步骤2(建分区表)-------通过CTAS的方式从 '||p_tab||'_'||YYYYMMDD|| ' 中新建'||p_tab ||'表,完成初步分区改造工作');
   end p_ctas_002;





  procedure p_split_part_003 (p_tab in varchar2,
                              p_deal_flag in number,
                              p_part_nums in number default 24,
                              p_tab_tablespace IN VARCHAR2)
  as
  /*
   功能:将CREATE TABLE AS SELECT 的方式新建出一个只有MAXVALUE的初步分区表进行SPLIT,
   按月份进行切分,默认p_part_nums产生24个分区,构造2年的分区表,第一个分区为当前月的
   上一个月
  */
  v_first_day   date;
  v_next_day    date;
  v_prev_day    date;
  v_sql_p_split_part         varchar2(4000);
  begin
   select to_date(to_char(sysdate, 'yyyymm') || '01', 'yyyymmdd')
          into v_first_day
          from dual;
    for i in 1 .. p_part_nums loop
       select add_months(v_first_day, i) into v_next_day from dual;
       select add_months(v_next_day, -1) into v_prev_day from dual;
       v_sql_p_split_part := 'alter table '||p_tab||' split partition p_MAX at ' ||
               '(to_date(''' || to_char(v_next_day, 'yyyymmdd') ||
               ''',''yyyymmdd''))' || 'into (partition PART_' ||
               to_char(v_prev_day, 'yyyymm') || ' tablespace '|| p_tab_tablespace||' ,partition p_MAX)';
      -- DBMS_OUTPUT.PUT_LINE(v_sql_p_split_part);--调试使用
       p_insert_log(p_tab,'p_split_part',v_sql_p_split_part,'分区表完成分区SPLIT工作',3,i);
       p_if_judge(v_sql_p_split_part,p_deal_flag);
    end loop;
  DBMS_OUTPUT.PUT_LINE('操作步骤3(分区操作)-------对新建的'||p_tab ||'分区表完成分区SPLIT工作');
  end p_split_part_003;





  procedure p_tab_comments_004  (p_tab in varchar2,p_deal_flag in number)
  as
  /*
   功能:从_YYYYMMDD备份表中得到表和字段的注释,为新分区表的表名和字段增加注释
  */
  v_sql_p_tab_comments         varchar2(4000);
  v_cnt number;
  begin
   select count(*) into v_cnt from user_tab_comments where table_name=UPPER(P_TAB)||'_'||YYYYMMDD AND COMMENTS IS NOT NULL;
   if v_cnt>0 then 
    for i in (select * from user_tab_comments where table_name=UPPER(P_TAB)||'_'||YYYYMMDD AND COMMENTS IS NOT NULL) loop
      v_sql_p_tab_comments:='comment on table '||p_tab||' is '|| ''''||i.COMMENTS||'''';
     -- DBMS_OUTPUT.PUT_LINE(v_sql_p_deal_tab_comments);--调试使用
      p_insert_log(p_tab,'p_deal_comments',v_sql_p_tab_comments,'将新分区表的表的注释加上',4,1);
      p_if_judge(v_sql_p_tab_comments,p_deal_flag);
    end loop;
     DBMS_OUTPUT.PUT_LINE('操作步骤4(表的注释)-------对'||p_tab ||'表增加表名的注释内容');
    ELSE 
     DBMS_OUTPUT.PUT_LINE('操作步骤4(表的注释)-------'||UPPER(P_TAB)||'_'||YYYYMMDD ||'并没有表注释!');
    END IF;
  end p_tab_comments_004;


  procedure p_col_comments_005  (p_tab in varchar2,p_deal_flag in number)
  as
  /*
   功能:从_YYYYMMDD备份表中得到表和字段的注释,为新分区表的表名和字段增加注释
  */
  v_sql_p_col_comments         varchar2(4000);
  v_cnt number;
  begin
    select count(*) into v_cnt from user_col_comments where table_name=UPPER(P_TAB)||'_'||YYYYMMDD AND COMMENTS IS NOT NULL;
    if v_cnt>0 then 
    for i in (select * from user_col_comments where table_name=UPPER(P_TAB)||'_'||YYYYMMDD AND COMMENTS IS NOT NULL) loop
      v_sql_p_col_comments:='comment on column '||p_tab||'.'||i.COLUMN_NAME||' is '|| ''''||i.COMMENTS||'''';
      p_insert_log(p_tab,'p_deal_col_comments',v_sql_p_col_comments,'将新分区表的列的注释加上',5,1);
      p_if_judge(v_sql_p_col_comments,p_deal_flag);
    end loop;
     DBMS_OUTPUT.PUT_LINE('操作步骤5(列的注释)-------对'||p_tab ||'表增加列名及字段的注释内容');
    else 
     DBMS_OUTPUT.PUT_LINE('操作步骤5(列的注释)-------'||UPPER(P_TAB)||'_'||YYYYMMDD ||'并没有列注释!');
    end if;
  end p_col_comments_005;



  procedure p_defau_and_null_006 (p_tab in varchar2,p_deal_flag in number)
  as
  /*
   功能:从_YYYYMMDD备份表中得到原表的DEFAULT值,为新分区表的表名和字段增加DEFAULT值
  */
  v_sql_defau_and_null        varchar2(4000);
  v_cnt  number;
  begin
  select count(*) into v_cnt  from user_tab_columns where table_name=UPPER(P_TAB)||'_'||YYYYMMDD and data_default is not null;
    if v_cnt>0 then 
    for i in (select * from user_tab_columns where table_name=UPPER(P_TAB)||'_'||YYYYMMDD and data_default is not null) loop
     v_sql_defau_and_null:='alter table '||p_tab||' modify '||i.COLUMN_NAME ||' default ' ||i.data_default;
     p_insert_log(p_tab,'p_deal_default',v_sql_defau_and_null,'将新分区表的默认值加上',6);
     p_if_judge(v_sql_defau_and_null,p_deal_flag);
    end loop;
     DBMS_OUTPUT.PUT_LINE('操作步骤6(空和默认)-------对'||p_tab ||'表完成默认DEFAULT值的增加');
    else
     DBMS_OUTPUT.PUT_LINE('操作步骤6(空和默认)-------'||UPPER(P_TAB)||'_'||YYYYMMDD ||'并没有DEFAULT或NULL值!');
    end if;
    
  end p_defau_and_null_006;




  procedure p_check_007 (p_tab in varchar2,p_deal_flag in number)
  as
  /*
   功能:从_YYYYMMDD备份表中得到原表的CHECK值,为新分区表增加CHECK值
   另注:
    user_constraints已经包行了非空的判断,可以略去如下类似的从user_tab_columns获取非空判断的代码编写来判断是否
    for i in (select * from user_tab_columns where table_name=UPPER(P_TAB)||'_'||YYYYMMDD and nullable='N') loop
     v_sql:='alter table '||p_tab||' modify '||i.COLUMN_NAME ||' not null';
  */
   v_sql_p_check         varchar2(4000);
   v_cnt number;
  begin
  select count(*) into v_cnt from user_constraints where table_name=UPPER(P_TAB)||'_'||YYYYMMDD and constraint_type='C';
   if v_cnt>0 then 
   for i in (select * from user_constraints where table_name=UPPER(P_TAB)||'_'||YYYYMMDD and constraint_type='C') loop
      v_sql_p_check :='alter table '||P_TAB||'_'||YYYYMMDD ||' drop constraint ' || I.CONSTRAINT_NAME;
      p_insert_log(p_tab,'p_deal_check',v_sql_p_check ,'将备份出来的原表的CHECK删除',7,1);
      p_if_judge(v_sql_p_check ,p_deal_flag);
      v_sql_p_check :='alter table '||p_tab||' ADD CONSTRAINT '||I.CONSTRAINT_NAME||' CHECK ('||I.SEARCH_CONDITION ||')' ;
      p_insert_log(p_tab,'p_deal_check',v_sql_p_check ,'将新分区表的CHECK加上',7,2);
      p_if_judge(v_sql_p_check ,p_deal_flag);
    end loop;
     DBMS_OUTPUT.PUT_LINE('操作步骤7(check约束)-------对'||p_tab ||'完成CHECK的约束');
    else 
     DBMS_OUTPUT.PUT_LINE('操作步骤7(check约束)-----'||UPPER(P_TAB)||'_'||YYYYMMDD ||'并没有CHECK!');
    end if;
    
  end p_check_007;

   procedure p_index_008 (p_tab in varchar2,p_deal_flag in number,p_idx_tablespace IN VARCHAR2)
  as
  /*
   功能:从_YYYYMMDD备份表中得到原表的索引信息,为新分区表增加普通索引(唯一和非唯一索引,函数索引暂不考虑),并删除旧表索引
   难点:需要考虑联合索引的情况
  */
   v_sql_p_normal_idx         varchar2(4000);
   v_cnt number;
   
  begin
     SELECT count(*) into v_cnt
      from user_indexes
      where table_name = UPPER(P_TAB)||'_'||YYYYMMDD 
      and index_type='NORMAL' AND INDEX_NAME NOT IN (SELECT CONSTRAINT_NAME FROM USER_CONSTRAINTS);
  if v_cnt>0 then 
    for i in
    (
      WITH T AS 
      (
      select C.*,I.UNIQUENESS
      from user_ind_columns C
      ,(SELECT DISTINCT index_name,UNIQUENESS
      from user_indexes
      where table_name = UPPER(P_TAB)||'_'||YYYYMMDD 
      and index_type='NORMAL' 
      AND INDEX_NAME NOT IN
      (SELECT CONSTRAINT_NAME FROM USER_CONSTRAINTS)
      ) i
      where c.index_name = i.index_name
      )
      SELECT INDEX_NAME,TABLE_NAME,UNIQUENESS, MAX(substr(sys_connect_by_path(COLUMN_NAME, ','), 2)) str ---考虑组合索引的情况
      FROM (SELECT column_name,INDEX_NAME,TABLE_NAME, row_number() over(PARTITION BY INDEX_NAME,TABLE_NAME ORDER BY COLUMN_NAME) rn
      ,UNIQUENESS
      FROM T) t
      START WITH rn = 1
      CONNECT BY rn = PRIOR rn + 1
      AND INDEX_NAME = PRIOR INDEX_NAME
      GROUP BY INDEX_NAME,T.TABLE_NAME,UNIQUENESS
) loop
      v_sql_p_normal_idx:= 'drop index '||i.index_name;
      p_insert_log(p_tab,'p_deal_normal_idx',v_sql_p_normal_idx,'删除原表索引',8,1);
      p_if_judge(v_sql_p_normal_idx,p_deal_flag);
      DBMS_OUTPUT.PUT_LINE('操作步骤8(处理索引)-------将'||i.table_name ||'的'||i.str||'列的索引'||i.index_name||'删除完毕');
       if i.uniqueness='UNIQUE' then
         v_sql_p_normal_idx:='CREATE UNIQUE INDEX ' || i.INDEX_NAME || ' ON '|| p_tab||'('||i.STR||')'||' tablespace '||p_idx_tablespace ;
        elsif i.uniqueness='NONUNIQUE' then
         v_sql_p_normal_idx:='CREATE  INDEX ' || i.INDEX_NAME || ' ON '|| p_tab ||'('||i.STR||')'||' LOCAL tablespace '||p_idx_tablespace ;
        end if;
      p_insert_log(p_tab,'p_deal_normal_idx',v_sql_p_normal_idx,'将新分区表的索引加上',8,2);
      p_if_judge(v_sql_p_normal_idx,p_deal_flag);
      DBMS_OUTPUT.PUT_LINE('操作步骤8(处理索引)-------对'||p_tab ||'新分区表'||i.STR||'列增加索引'||i.index_name);
    end loop;
   else 
     DBMS_OUTPUT.PUT_LINE('操作步骤8(处理索引)-------'||UPPER(P_TAB)||'_'||YYYYMMDD ||'并没有索引(索引模块并不含主键判断)!');
   end if;
  
  end p_index_008;
  
  procedure p_pk_009 (p_tab in varchar2,p_deal_flag in number,p_idx_tablespace IN VARCHAR2)
  as
  /*
   功能:从_YYYYMMDD备份表中得到原表的主键信息,为新分区表增加主键值,并删除旧表主键
   难点:需要考虑联合主键的情况
  */
  v_sql_p_pk         varchar2(4000);
  v_cnt              number;
  
  begin
  SELECT count(*) into v_cnt
          from USER_IND_COLUMNS
          where index_name in (select index_name
                                from sys.user_constraints t
                               WHERE TABLE_NAME =UPPER(P_TAB)||'_'||YYYYMMDD
                                 and constraint_type = 'P');
  if v_cnt>0 then 
    for i in
    (WITH T AS
     (SELECT INDEX_NAME,TABLE_NAME,COLUMN_NAME
          from USER_IND_COLUMNS
          where index_name in (select index_name
                                from sys.user_constraints t
                               WHERE TABLE_NAME =UPPER(P_TAB)||'_'||YYYYMMDD
                                 and constraint_type = 'P')
      )
     SELECT INDEX_NAME,TABLE_NAME, MAX(substr(sys_connect_by_path(COLUMN_NAME, ','), 2)) str
    FROM (SELECT  column_name,INDEX_NAME,TABLE_NAME, row_number() over(PARTITION BY INDEX_NAME,TABLE_NAME ORDER BY COLUMN_NAME) rn
          FROM T) t
    START WITH rn = 1
    CONNECT BY rn = PRIOR rn + 1
    AND INDEX_NAME = PRIOR INDEX_NAME
    GROUP BY INDEX_NAME,T.TABLE_NAME
) loop
      v_sql_p_pk:= 'alter table '||i.table_name||' drop constraint '||i.index_name|| ' cascade';
      p_insert_log(p_tab,'p_deal_pk',v_sql_p_pk,'将备份出来的原表的主键删除',9,1);
      p_if_judge(v_sql_p_pk,p_deal_flag);
      DBMS_OUTPUT.PUT_LINE('操作步骤9(处理主键)-------将备份出来的原表'||i.table_name||'的'||i.str||'列的主键'||i.index_name||'删除完毕!'); ---放在FOR循环中效率没问题,因为主键只有一个,只会循环一次
      v_sql_p_pk:='ALTER TABLE '||p_tab||' ADD CONSTRAINT '||I.INDEX_NAME||' PRIMARY KEY ('||I.STR||')' ||' using index tablespace '||p_idx_tablespace ;
      p_insert_log(p_tab,'p_deal_pk',v_sql_p_pk,'将新分区表的主键加上',9,2);
      p_if_judge(v_sql_p_pk,p_deal_flag);
      DBMS_OUTPUT.PUT_LINE('操作步骤9(处理主键)-------对'||p_tab ||'表的'||i.str||'列增加主键'||i.index_name); ---放在FOR循环中效率,因为主键只有一个,只会循环一次
    end loop;
   else 
      DBMS_OUTPUT.PUT_LINE('操作步骤9(处理主键)-------'||UPPER(P_TAB)||'_'||YYYYMMDD ||'并没有主键!');
   end if;
  
  end p_pk_009;



  procedure p_constraint_010 (p_tab in varchar2,p_deal_flag in number)
  as
  /*
   功能:从_YYYYMMDD备份表中得到原表的约束,为新分区表增加约束值,并删除旧表约束
   难点:需要考虑联合外键REFERENCE 联合主键的情况
  */
  v_sql_p_constraint         varchar2(4000);
  v_cnt  number;
  begin
  SELECT count(*) into v_cnt  FROM user_constraints where table_name=UPPER(P_TAB)||'_'||YYYYMMDD AND CONSTRAINT_TYPE='R';
  if v_cnt>0 then
    for i in
    (with t1 as (
      SELECT  /*+no_merge */ 
             POSITION
            ,t.owner,t.constraint_name as constraint_name1,t.table_name as table_name1 ,t.column_name as column_name1  FROM user_cons_columns t where constraint_name in
      (
      SELECT CONSTRAINT_NAME FROM user_constraints where table_name=UPPER(P_TAB)||'_'||YYYYMMDD AND CONSTRAINT_TYPE='R'
      )
      ),
    t2 as (
      SELECT  /*+no_merge */ 
             t.POSITION
            ,c.constraint_name constraint_name1
            ,t.constraint_name as constraint_name2,t.table_name as table_name2 ,t.column_name as column_name2
            ,MAX(t.POSITION) OVER (PARTITION BY c.constraint_name) MAX_POSITION 
        FROM user_cons_columns t 
            ,user_constraints c
        WHERE c.table_name = UPPER(P_TAB)||'_'||YYYYMMDD
              AND t.constraint_name = c.r_constraint_name
              AND c.constraint_type='R'
      ),
    t3 AS (
      SELECT t1.* 
            ,t2.constraint_name2
            ,t2.table_name2 
            ,t2.column_name2
            ,t2.max_position 
      FROM t1,t2 
      WHERE t1.constraint_name1 = t2.constraint_name1 AND t1.position=t2.position)
    select t3.*,SUBSTR(SYS_CONNECT_BY_PATH(column_name1,','),2) as FK,SUBSTR(SYS_CONNECT_BY_PATH(column_name2,','),2) AS PK from t3 
     WHERE POSITION=MAX_POSITION
     START WITH position=1
     CONNECT BY constraint_name1 = PRIOR constraint_name1
               AND position = PRIOR position+1) loop
    v_sql_p_constraint:= 'alter table '||p_tab||'_'||YYYYMMDD  ||' drop constraint '||i.constraint_name1;
    p_insert_log(p_tab,'p_deal_constraint',v_sql_p_constraint,'删除原表FK外键' ,10,1);
    p_if_judge(v_sql_p_constraint,p_deal_flag);
    DBMS_OUTPUT.PUT_LINE('操作步骤10(处理外键)------将备份出来的'||i.table_name1||'表的'||i.column_name1||'列的外键'||i.constraint_name1||'删除完毕!');
    v_sql_p_constraint:= 'alter table ' || p_tab ||' add constraint '||i.constraint_name1 || ' foreign key ( '
    ||i.fk||') references '||i.table_name2|| ' ('||i.pk||' )';
    p_insert_log(p_tab,'p_deal_constraint',v_sql_p_constraint,'将新分区表的外键加上',10,2);
    p_if_judge(v_sql_p_constraint,p_deal_flag);
    DBMS_OUTPUT.PUT_LINE('操作步骤10(处理外键)------对'||p_tab ||'表的'||i.column_name1||'列增加外键'||i.constraint_name1);
    end loop;
   else 
    DBMS_OUTPUT.PUT_LINE('操作步骤10(处理外键)------'||UPPER(P_TAB)||'_'||YYYYMMDD ||'并没有外键!');
   end if;
    
  end p_constraint_010;

 
  procedure p_main (p_tab in varchar2,                 --需要进行分区改造的表名
                    p_deal_flag in number default 0,   --0为不执行,只将脚本记录进part_tab_log的sql_text字段中,1为不仅记录,而且执行!
                    p_parallel in number default 4,    --并行度设置
                    p_part_colum in varchar2,         --需要分区的列(时间范围分区)
                    p_part_nums in number default 24,  --需要分区的分区数
                    p_struct_only in number default 0, --新表是否是只建表结构不导入数据,0为只建结构,非0如1等值,为导数据
                    p_tab_tablespace IN VARCHAR2,      --分区的数据表空间
                    p_idx_tablespace IN VARCHAR2)      --分区的索引表空间
  as
  /*
   功能:主程序,调用前面的各个子程序
   完善点:1.既要考虑只建新表结构的情况,也要考虑建新分区表同时将数据导入的情况;
           2.既要考虑只记录日志将来手工执行的情况,也要考虑直接执行的情况
           3.分区字段必须是时间字段,因为本程序主要是针对时间来做范围分区的。
  */
  v_sql_p_main               varchar2(4000);
  v_data_type                varchar2(40);
  v_cnt_tab_log              varchar2(40);
  begin
 
      select DATA_TYPE into v_data_type from user_tab_columns where table_name=upper(p_tab) and column_name=upper(p_part_colum);--用来后续判断分区列是否是时间字段(注意大写)
      SELECT COUNT(*) INTO v_cnt_tab_log FROM USER_TABLES WHERE TABLE_NAME='PART_TAB_LOG';
       IF v_data_type ='DATE' THEN ---开始对输入分区列是否是时间类型进行判断
        if v_cnt_tab_log=0 then 
        p_create_log_tab;---日志表不存在则创建日志表
        end if;
        v_sql_p_main :='delete from  part_tab_log where tab_name='||''''||p_tab||'''';
        --DBMS_OUTPUT.PUT_LINE(v_sql);--调试使用
        execute immediate v_sql_p_main ;
        commit;
        p_rename_001         (p_tab); ---将原表先RENAME为YYYYMMDD的后缀名的表
        p_ctas_002           (p_tab, p_struct_only ,p_deal_flag, p_part_colum,p_parallel ,p_tab_tablespace ); ---CTAS建立除部分分区的分区表
        p_split_part_003     (p_tab,p_deal_flag,p_part_nums ,p_tab_tablespace); ---将分区表分区完整
        p_tab_comments_004   (p_tab,p_deal_flag);  --增加分区表的表注释
        p_col_comments_005   (p_tab,p_deal_flag);  --增加分区表的列注释
        p_defau_and_null_006 (p_tab,p_deal_flag);  --增加分区表的默认值
        p_check_007          (p_tab,p_deal_flag);  --增加分区表的CHECK
        p_index_008          (p_tab,p_deal_flag,p_idx_tablespace); --增加分区表的索引
        p_pk_009             (p_tab,p_deal_flag,p_idx_tablespace);  --增加分区表的主键
        p_constraint_010     (p_tab,p_deal_flag );---增加外键
        if p_deal_flag=0 then
        DBMS_OUTPUT.PUT_LINE('请特别留意!!!,以上只是对'||p_tab ||'进行生成脚本动作,未真正执行分区改造等执行动作,表也未真正执行RENAME动作,具体脚本可从part_tab_log中获取从而手工执行,具体如下:');
        dbms_output.put_Line('select sql_text'||'||'||''';'''||' from part_tab_log t where tab_name='||''''||p_tab||''''||' order by exec_order1,exec_order2;');  
        DBMS_OUTPUT.PUT_LINE('如果需要真正完成执行动作,请将pkg_deal_part_tab.p_main的 p_deal_flag参数值设置为非0值,如1');
        execute immediate 'rename '||P_TAB||'_'||YYYYMMDD ||' to '||P_TAB; --即便只获取脚本而无需执行,前面RENAME的动作也必须真实完成,因为很多数据字典信息要取自_YYYYMM表的信息,因此前面必须执行,只好在这里最后再RENAME替换回去
        ELSE
        DBMS_OUTPUT.PUT_LINE('操作步骤结束----对'||p_tab ||'已经真正完成了操作分区改造等执行步骤,脚本可参看part_tab_log表');       
        end IF;
       ELSE 
        RAISE_APPLICATION_ERROR(-20066,'分区字段'||p_part_colum||'必须是时间字段');
    END IF;
   end p_main;

 

  
  procedure p_do   (p_tab            in varchar2,                 --需要进行分区改造的表名
                    p_deal_flag      in number   default 0,       --0为不执行,只将脚本记录进part_tab_log的sql_text字段中,1为不仅记录,而且执行!
                    p_parallel       in number   default 4,       --并行度设置
                    p_part_colum     in varchar2,                 --需要分区的列(时间范围分区)
                    p_part_nums      in number   default 24,      --需要分区的分区数
                    p_struct_only    in number   default 0,       --新表是否是只建表结构不导入数据,0为只建结构,非0如1等值,为导数据
                    p_tab_tablespace IN VARCHAR2,                 --分区的数据表空间
                    p_idx_tablespace IN VARCHAR2)                 --分区的索引表空间
  as
  /*
   功能:将主程序p_main做一层封装,将输入表名,输入数据表空间和输入索引表空间这三个参数是否
   正确做了判断。由于p_main里调用的001-010这10个模块是DDL语句,不能在一个事务中,所以尽量考虑
   逻辑在前面判断正确后再执行这个P_MAIN
  */

  v_cnt_p_tab                number;
  v_cnt_tab_tablespace       varchar2(40);
  v_cnt_idx_tablespace       varchar2(40);
  begin
   select count(*) into v_cnt_p_tab from user_tables where table_name=upper(p_tab);
  select count(*) into v_cnt_tab_tablespace from user_tablespaces where tablespace_name=upper(p_tab_tablespace);
  select count(*) into v_cnt_idx_tablespace from user_tablespaces where tablespace_name=upper(p_idx_tablespace);
  
  if v_cnt_p_tab>0  then 
    if  v_cnt_tab_tablespace>0 then 
       if v_cnt_idx_tablespace>0 then
         p_main(p_tab,p_deal_flag, p_parallel,p_part_colum,p_part_nums, p_struct_only,p_tab_tablespace,p_idx_tablespace);  
       else
         RAISE_APPLICATION_ERROR(-20067,'输入的索引表空间'||p_idx_tablespace||'不存在,请仔细检查输入的索引表空间名');
       end if; 
    else
       RAISE_APPLICATION_ERROR(-20068,'输入的数据表空间'||p_tab_tablespace||'不存在,请仔细检查输入的数据表空间名');
    end if;
  else 
     RAISE_APPLICATION_ERROR(-20069,'输入参数的表名'||p_tab||'不存在,请仔细检查表名');
  end if;
end p_do;



end pkg_deal_part_tab;
/

  

原文地址:https://www.cnblogs.com/sunliyuan/p/12307666.html