Skip to content

PL/SQL failure when upserting CLOBs #78

@joboon

Description

@joboon

When using CLOB and processing an upsert where the field is greater than 4000 characters the update fails. I suspect this is due to the fact that string fields are treated as a TABLE OF VARCHAR2(4000) regardless of the content's actual length.

Here is a contrived example that causes the problem:

package main

import (
	"fmt"

	"github.com/google/uuid"
	"github.com/oracle-samples/gorm-oracle/oracle"
	"gorm.io/gorm"
)

type FolderData struct {
	ID         string           `gorm:"primaryKey;column:folder_id"`
	Name       string           `gorm:"column:folder_nm"`
	Properties []FolderProperty `gorm:"foreignKey:ID;PRELOAD:false"`
}

func (FolderData) TableName() string {
	return "folder_data"
}

type FolderProperty struct {
	Blah  uint64 `gorm:"column:blah;autoIncrement"`
	ID    string `gorm:"primaryKey;column:folder_id"`
	Key   string `gorm:"primaryKey;unique;column:key"`
	Value string `gorm:"column:value"`
}

func (FolderProperty) TableName() string {
	return "folder_property"
}

func initialize(db *gorm.DB) {
	_ = db.Exec("DROP TABLE folder_property CASCADE CONSTRAINTS")
	_ = db.Exec("DROP TABLE folder_data CASCADE CONSTRAINTS")

	db.Exec(`
		CREATE TABLE folder_data (
			folder_id VARCHAR2(36) NOT NULL,
			folder_nm VARCHAR2(100 CHAR) NOT NULL,
			CONSTRAINT folder_data_pk PRIMARY KEY (folder_id))
	`)
	err := db.Error
	if err != nil {
		panic(err)
	}

	db.Exec(`
		CREATE TABLE folder_property (
			blah NUMBER GENERATED BY DEFAULT AS IDENTITY (START WITH 1),
			folder_id VARCHAR2(36) NOT NULL,
			value CLOB,
			key VARCHAR2(100) NOT NULL UNIQUE,
			CONSTRAINT folder_property_pk PRIMARY KEY (folder_id, key),
			CONSTRAINT folder_property_fk01 FOREIGN KEY (folder_id) REFERENCES folder_data (folder_id)
		)
	`)
	err = db.Error
	if err != nil {
		panic(err)
	}

	db.Debug()
}

func main() {
	dsn := <REDACTED>

	db, err := gorm.Open(oracle.Dialector{
		Config: &oracle.Config{
			DataSourceName:       dsn,
			SkipQuoteIdentifiers: true,
		},
	}, &gorm.Config{})
	if err != nil {
		panic(err)
	}

	initialize(db)

	id := uuid.New().String()
	folder := &FolderData{
		ID:   id,
		Name: "My Folder",
		Properties: []FolderProperty{
			{
				ID:    id,
				Key:   "prop1",
				Value: "some string value",
			},
			{
				ID:    id,
				Key:   "prop2",
				Value: "some other string value",
			},
		},
	}
	err = db.Transaction(func(tx *gorm.DB) error {
		if err := tx.Create(&folder).Error; err != nil {
			tx.Rollback()
			return err
		}
		return nil
	})
	if err != nil {
		panic(err)
	}

	createdFolder := &FolderData{}
	db.Model(&FolderData{}).Preload("Properties").First(&createdFolder)
	fmt.Printf("Done! Created record: %v\n", createdFolder)

	clobContent := `
<TRUNCATED>
	`
	createdFolder.Properties[1].Value = clobContent
	db.Save(&createdFolder)
	fmt.Printf("Done! Updated record: %v\n", createdFolder)
}

Running the example above produces the following error message:

ORA-06502: PL/SQL: value or conversion error: character string buffer too small
ORA-06512: at line 21
Help: https://docs.oracle.com/error-help/db/ora-06502/

With the following PL/SQL block:

DECLARE
  TYPE t_record IS RECORD (
    blah folder_property.blah%TYPE,
    folder_id folder_property.folder_id%TYPE,
    key folder_property.key%TYPE,
    value folder_property.value%TYPE
  );
  TYPE t_records IS TABLE OF t_record;
  l_affected_records t_records;
  TYPE t_col_0_array IS TABLE OF VARCHAR2(4000);
  l_col_0_array t_col_0_array;
  TYPE t_col_1_array IS TABLE OF VARCHAR2(4000);
  l_col_1_array t_col_1_array;
  TYPE t_col_2_array IS TABLE OF VARCHAR2(4000);
  l_col_2_array t_col_2_array;
  TYPE t_col_3_array IS TABLE OF NUMBER;
  l_col_3_array t_col_3_array;
BEGIN
  l_col_0_array := t_col_0_array('332d18e3-5540-4672-b0af-4849ba861553', '332d18e3-5540-4672-b0af-4849ba861553');
  l_col_1_array := t_col_1_array('prop1', 'prop2');
  l_col_2_array := t_col_2_array('some string value', '<TRUNCATED>');
  l_col_3_array := t_col_3_array(1, 2);
  FORALL i IN 1..2
    MERGE INTO folder_property t
    USING (SELECT l_col_0_array(i) AS folder_id, l_col_1_array(i) AS key, l_col_2_array(i) AS value, l_col_3_array(i) AS blah FROM DUAL) s
    ON (t.key = s.key)
    WHEN MATCHED THEN UPDATE SET t.folder_id = s.folder_id, t.value = s.value, t.blah = s.blah
    WHEN NOT MATCHED THEN INSERT (folder_id, key, value) VALUES (s.folder_id, s.key, s.value)
    RETURNING blah, folder_id, key, value
    BULK COLLECT INTO l_affected_records;
  IF l_affected_records.COUNT > 0 THEN 0 := l_affected_records(1).blah; END IF;
  IF l_affected_records.COUNT > 0 THEN '' := l_affected_records(1).folder_id; END IF;
  IF l_affected_records.COUNT > 0 THEN '' := l_affected_records(1).key; END IF;
  IF l_affected_records.COUNT > 0 THEN '' := l_affected_records(1).value; END IF;
  IF l_affected_records.COUNT > 1 THEN 0 := l_affected_records(2).blah; END IF;
  IF l_affected_records.COUNT > 1 THEN '' := l_affected_records(2).folder_id; END IF;
  IF l_affected_records.COUNT > 1 THEN '' := l_affected_records(2).key; END IF;
  IF l_affected_records.COUNT > 1 THEN '' := l_affected_records(2).value; END IF;
END;

Defining t_col_2_array as a TABLE OF CLOB seems to solve the problem.

A possible workaround for this is to manually update the parent object and then update the child object that has the updated CLOB via bindings and wrap it all in a transaction.

Metadata

Metadata

Assignees

Labels

No labels
No labels

Type

No type

Projects

No projects

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions