測試環境基本信息:
OS:Windows XP sp3
DB:Oracle 9.2.0.1 未啓用歸檔
DB重做日誌文件大小:100MB
硬盤型號:SAMSUNG HD161GJ(SATA-300,160G,7200rpm,8M cache)
CPU:Intel Core2 E8400(3.0G)
內存:2G
通過HD Tune得到的硬盤基本測試信息,
IOPS:66
讀取:90MB/s
寫入:82MB/s
測試表腳本:
create table T_EMPLOYEE
(
ID NUMBER(10) not null,
NAME VARCHAR2(20) not null,
CREATE_DATE DATE default sysdate not null,
BIRTHDAY DATE,
ADDRESS VARCHAR2(200),
EMAIL VARCHAR2(200),
MOBILEPHONE VARCHAR2(11),
TELEPHONE VARCHAR2(20),
IDENTITY_CARD VARCHAR2(18),
WEIGHT NUMBER,
HEIGHT NUMBER
)
插入10萬條記錄,採用Java JDBC方式的測試結果如下(單位:秒):
執行方式 |
OCI |
OCI |
thin |
thin |
Statement(test1) |
69.84 |
67.03 |
66.96 |
42.81 |
PreparedStatement(test2) |
40 |
37.18 |
39.21 |
12.66 |
PreparedStatement Batch(test3) |
51.72 |
50.78 |
2.81 |
2.81 |
從測試結果可以看出,採用thin連接方式 batch插入的性能最好,而採用oci的batch插入性能未得到提高,也可能是bug。
記得以前在ORACLE文檔裏說採用oci模式的性能最好,9i中經過測試完全不成立,我想可能是以前java本身性能的問題,現在java語言的性能已經非常好了,thin連接方式不管是從管理還性能方面來說都是首選了。
注:採用oci的batch插入性能未得到提高確實是BUG,後來把ORACLE客戶端升級到9.2.0.8,花的時間只要6s,但還是比thin的方式差。
以下是測試程序源碼:
import java.sql.*;
import java.util.Calendar;
public class inserttest {
public static void test1(Connection iConn) throws SQLException{
Statement statement = iConn.createStatement();
String str_i;
for (int i=1;i<10000;i++) {
str_i=new Integer(i).toString();
String vSQL = "insert into t_employee(id,name,birthday,address,email,mobilephone,telephone,identity_card,weight,height)/n"
+ "values(seq_t_employee_id.nextval,'張三"+str_i+ "',sysdate - "+str_i+","
+ "'上海市南京東路11號203室"+str_i+"',"
+ "'abcd"+str_i+"@gmail.com',"
+ "'138'|| trim(to_char("+str_i+", '00000000')),"
+ "'021-'|| trim(to_char("+str_i+", '00000000')),"
+ "'3504561980' || trim(to_char("+str_i+", '00000000')),"
+ "64,1.72)";
//System.out.println(vSQL);
statement.executeUpdate(vSQL);
}
}
public static void test2(Connection iConn) throws SQLException{
String vSQL = "insert into t_employee(id,name,birthday,address,email,mobilephone,telephone,identity_card,weight,height)/n"
+ "values(seq_t_employee_id.nextval,'張三'||?,sysdate - ?,"
+ "'上海市南京東路11號203室'||?,"
+ "'abcd'||?||'@gmail.com',"
+ "'138'|| trim(to_char(?, '00000000')),"
+ "'021-'|| trim(to_char(?, '00000000')),"
+ "'3504561980' || trim(to_char(?, '00000000')),"
+ "64,1.72)";
PreparedStatement ps = iConn.prepareStatement(vSQL);
String str_i;
for (int i=1;i<10000;i++) {
str_i=new Integer(i).toString();
ps.setString(1, str_i);
ps.setString(2, str_i);
ps.setString(3, str_i);
ps.setString(4, str_i);
ps.setInt(5, i);
ps.setInt(6, i);
ps.setInt(7, i);
ps.executeUpdate();
}
}
public static void test3(Connection iConn) throws SQLException{
String vSQL = "insert into t_employee(id,name,birthday,address,email,mobilephone,telephone,identity_card,weight,height)/n"
+ "values(seq_t_employee_id.nextval,'張三'||?,sysdate - ?,"
+ "'上海市南京東路11號203室'||?,"
+ "'abcd'||?||'@gmail.com',"
+ "'138'|| trim(to_char(?, '00000000')),"
+ "'021-'|| trim(to_char(?, '00000000')),"
+ "'3504561980' || trim(to_char(?, '00000000')),"
+ "64,1.72)";
PreparedStatement ps = iConn.prepareStatement(vSQL);
String str_i;
for (int i=1;i<10000;i++) {
str_i=new Integer(i).toString();
ps.setString(1, str_i);
ps.setString(2, str_i);
ps.setString(3, str_i);
ps.setString(4, str_i);
ps.setInt(5, i);
ps.setInt(6, i);
ps.setInt(7, i);
ps.addBatch();
//ps.executeUpdate();
}
ps.executeBatch();
}
public static void main(String[] args) throws ClassNotFoundException,SQLException {
Class.forName("oracle.jdbc.driver.OracleDriver");
//Connection conn = DriverManager.getConnection("jdbc:oracle:thin:@127.0.0.1:1521:mydb", "yzs", "yzs");
Connection conn = DriverManager.getConnection("jdbc:oracle:oci8:@mydb", "yzs", "yzs");
java.util.Date d1=Calendar.getInstance().getTime();
conn.setAutoCommit(false);
test2(conn);
java.util.Date d2= Calendar.getInstance().getTime();
System.out.println("es:"+(d2.getTime()-d1.getTime())+"ms");
conn.commit();
conn.close();
}
}
採用服務器PL/SQL 方式插入10萬條記錄的測試結果如下:
注:t_e1與t_employee同樣的表結構
執行方式 |
說明 |
運行時間 (單位:秒) |
pl/sql insert(腳本1) |
普通insert |
3.203 |
pl/sql forall insert(腳本2) |
從一個表BULK COLLECT INTO到目標表 |
0.578 |
insert into select *(腳本3) |
使用insert into select方式插入 |
0.156 |
insert /*+ append*/ into select *(腳本4) |
加append hint的插入 |
0.234 |
從測試結果分析,採用insert into select 的方式最快,只要0.156s,根據數據量統計,平均每行大小爲134字節,總共插入數據量爲134*100000=12.78MB,可得每秒約插入81MB的數據,基本上達到了硬盤的上限。
而採用append hint插入反而更慢,從同事討論結果得到,採用append的insert會採用direct-path插入,因此數據會直接寫入數據文件,所以消耗的時間更多。
--------------------------------------------腳本1--------------
declare
i integer;
begin
for i in 1 .. 100000 loop
insert into t_employee
(id,
name,
birthday,
address,
email,
mobilephone,
telephone,
identity_card,
weight,
height)
values
(
seq_t_employee_id.nextval,
'張三' || i,
sysdate - i,
'上海市南京東路11號203室' || i,
'abcd' || i || '@gmail.com',
'138' || trim(to_char(i, '00000000')),
'021-' || trim(to_char(i, '00000000')),
'3504561980' || trim(to_char(i, '00000000')),
64,
1.72);
end loop;
commit;
end;
--------------------------------------------腳本1--------------
--------------------------------------------腳本2--------------
DECLARE
TYPE table_t_employee IS TABLE OF t_employee%ROWTYPE;
v_table table_t_employee;
BEGIN
SELECT * BULK COLLECT INTO v_table FROM t_employee;
FORALL idx IN 1 .. v_table.COUNT
INSERT INTO t_e1 VALUES v_table (idx);
COMMIT;
END;
--------------------------------------------腳本2--------------
--------------------------------------------腳本3--------------
insert into t_e1 select * from t_employee
--------------------------------------------腳本3--------------
--------------------------------------------腳本4--------------
insert /*+ append*/ into t_e1 select * from t_employee
--------------------------------------------腳本4-------------