User creates schema and table. Constraint is added on the empty table. Constraint is dropped. A record is inserted into table. Attempt to add constraint to table returns ERROR[8448].
SQL>create schema sch002;
--- SQL operation complete.
SQL>set schema sch002;
--- SQL operation complete.
SQL>create table tab1(a int not null primary key, b int, c int, constraint cons41 unique(b));
--- SQL operation complete.
SQL>alter table tab1 add constraint cons42 unique(c);
--- SQL operation complete.
SQL>alter table tab1 drop constraint cons42;
--- SQL operation complete.
SQL>insert into tab1 values(1,2,3);
--- 1 row(s) inserted.
SQL>alter table tab1 add constraint cons42 unique(c);
*** ERROR[8448] Unable to access Hbase interface. Call to ExpHbaseInterface::addToHFile returned error HBASE_ADD_TO_HFILE_ERROR(-712). Cause:
org.apache.hadoop.security.AccessControlException: Permission denied: user=trafodion, access=WRITE, inode="/":hdfs:supergroup:drwxr-xr-x
at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkFsPermission(FSPermissionChecker.java:271)
at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:257)
at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:238)
at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:179)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:5607)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:5589)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkAncestorAccess(FSNamesystem.java:5563)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2282)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2235)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2188)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:505)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:354)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:585)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1026)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1986)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1982)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1554)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1980)
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
java.lang.reflect.Constructor.newInstance(Constructor.java:526)
org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73)
org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:1603)
org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1461)
org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1386)
org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:394)
org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:390)
org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:390)
org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:334)
org.apache.hadoop.hbase.util.FSUtils.create(FSUtils.java:341)
org.apache.hadoop.hbase.util.FSUtils.create(FSUtils.java:316)
org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter.createOutputStream(AbstractHFileWriter.java:266)
org.apache.hadoop.hbase.io.hfile.HFile$WriterFactory.create(HFile.java:391)
org.trafodion.sql.HBaseAccess.HBulkLoadClient.doCreateHFile(HBulkLoadClient.java:193)
org.trafodion.sql.HBaseAccess.HBulkLoadClient.addToHFile(HBulkLoadClient.java:229)
.
*** ERROR[1029] Object TRAFODION.SCH002.CONS42 could not be created.
SQL>
This error shows up during create unique index which is done as part of constraint creation.
During create index, a bulk load statement is executed to insert rows into index from base table.
Looks like temp hfile has some permission issue when it is being created in hdfs.
This error does not show up on empty table since create index does not do any load in that case.
Are you able to do a 'create unique index' on that same column?