【问题标题】:Give permissions for hdfs target directory using JAVA API使用 JAVA API 授予 hdfs 目标目录的权限
【发布时间】:2015-03-11 13:49:21
【问题描述】:

每当我使用 JAVA API 将数据放入 hdfs 时,我必须授予 hdfs 目标目录的权限。我试过以下代码:

import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;`enter code here`
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URI;
import java.net.URL;
import java.util.Scanner;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.thriftfs.api.IOException;

public class MovetoHdfs {

    public static void main(String[] args)throws Exception {
        MovetoHdfs m=new MovetoHdfs();
        Configuration conf1=new Configuration();
        //Scanner s=new Scanner(System.in);
        BufferedReader br=new BufferedReader(new InputStreamReader(System.in));

        System.out.println("Enter the input path");
        String inp=br.readLine();

        System.out.println("Enter output path");

        String opt=br.readLine();

        //m.moveFile(conf1,inp,opt);

        conf1.set("fs.default.name", "hdfs://10.2.152.113:8020");
        conf1.set("dfs.permissions.enabled", "false");
        conf1.set("dfs.permissions", "false");

        FileSystem fs= FileSystem.get(conf1);
        m.moveFile(fs,inp,opt);
    }

    public void moveFile(Configuration conf,final String inputFile, final String outputFile) throws Exception {

        FileSystem fs=null;
        conf=new Configuration();
        conf.set("fs.default.name", "hdfs://10.2.152.113:8020");
        conf.set("dfs.permissions.enabled", "false");
        conf.set("dfs.permissions", "false");
        Path src=new Path(inputFile);

        Path dest=new Path(outputFile);

        fs=FileSystem.get(conf);
        //fs.setPermission(dest, fp);
        fs.moveFromLocalFile(src, dest);
        fs.close();

    }

    public void moveFile(final String inputFile, final String outputFile) throws Exception {
        Configuration conf;

        conf=new Configuration();

        Path src=new Path(inputFile);

        Path dest=new Path(outputFile);

        FileSystem dfs=FileSystem.get(new URI("hdfs://10.2.152.113:8020"),conf);
        conf.set("dfs.permissions.enabled", "false");
        conf.set("dfs.permissions", "false");
        dfs.moveFromLocalFile(src, dest);
        dfs.close();
    }
}

我收到以下错误

log4j:WARN No appenders could be found for logger (org.apache.hadoop.conf.Configuration.deprecation).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
org.apache.hadoop.security.AccessControlException: Permission denied: user=Srinivas.budida, access=WRITE, inode="/user/hdfs/Test":root:hadoop:-rw-r--r--
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkFsPermission(DefaultAuthorizationProvider.java:257)
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:238)
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkPermission(DefaultAuthorizationProvider.java:151)
    at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:138)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6286)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6268)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPathAccess(FSNamesystem.java:6193)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2621)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2545)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2430)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:551)
    at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.create(AuthorizationProviderProxyClientProtocol.java:108)
    at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:388)
    at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
    at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:587)
    at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1026)
    at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2013)
    at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2009)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
    at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2007)

    at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
    at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
    at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
    at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106)
    at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73)
    at org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:1603)
    at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1461)
    at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1386)
    at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:394)
    at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:390)
    at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
    at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:390)
    at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:334)
    at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:906)
    at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:887)
    at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:784)
    at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:365)
    at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:338)
    at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java:1903)
    at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java:1871)
    at org.apache.hadoop.fs.FileSystem.moveFromLocalFile(FileSystem.java:1858)
    at com.solix.bigdata.MovetoHdfs.moveFile(MovetoHdfs.java:105)
    at com.solix.bigdata.MovetoHdfs.main(MovetoHdfs.java:46)
Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.AccessControlException): Permission denied: user=Srinivas.budida, access=WRITE, inode="/user/hdfs/Test":root:hadoop:-rw-r--r--
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkFsPermission(DefaultAuthorizationProvider.java:257)
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.check(DefaultAuthorizationProvider.java:238)
    at org.apache.hadoop.hdfs.server.namenode.DefaultAuthorizationProvider.checkPermission(DefaultAuthorizationProvider.java:151)
    at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:138)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6286)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:6268)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPathAccess(FSNamesystem.java:6193)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2621)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2545)
    at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2430)
    at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:551)
    at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.create(AuthorizationProviderProxyClientProtocol.java:108)
    at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:388)
    at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
    at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:587)
    at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1026)
    at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2013)
    at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2009)
    at java.security.AccessController.doPrivileged(Native Method)
    at javax.security.auth.Subject.doAs(Subject.java:415)
    at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
    at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2007)

    at org.apache.hadoop.ipc.Client.call(Client.java:1409)
    at org.apache.hadoop.ipc.Client.call(Client.java:1362)
    at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
    at com.sun.proxy.$Proxy9.create(Unknown Source)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:606)
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:186)
    at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
    at com.sun.proxy.$Proxy9.create(Unknown Source)
    at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.create(ClientNamenodeProtocolTranslatorPB.java:258)
    at org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:1599)
    ... 17 more

【问题讨论】:

    标签: hadoop hdfs


    【解决方案1】:

    使用下面的代码 sn-p。它是在 hdfs 上创建目录的简单示例。使用 dfs 对象。

    import java.io.IOException;
    import java.net.URISyntaxException;
    import java.security.PrivilegedExceptionAction;
    
    import org.apache.hadoop.security.UserGroupInformation;
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.FileSystem;
    import org.apache.hadoop.fs.Path;
    
    public class testinghdfs {
    
    public static void main(String[] args) throws IOException, URISyntaxException, InterruptedException 
    
    { 
    try{
    UserGroupInformation ugi
        = UserGroupInformation.createRemoteUser("hdfs");
    
      ugi.doAs(new PrivilegedExceptionAction<Void>() {
    
     public Void run() throws Exception {
    
       Configuration config = new Configuration();
       config.addResource(new Path("/etc/hadoop/conf/core-site.xml"));
       config.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml"));
    
       config.set("fs.hdfs.impl", 
               org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
           config.set("fs.file.impl",org.apache.hadoop.fs.LocalFileSystem.class.getName());
     FileSystem dfs = FileSystem.get(config);
    
      String dirName = "TestDirectory";
      System.out.println(dfs.getWorkingDirectory() +" this is from /n/n");
      Path src = new Path(dfs.getWorkingDirectory()+"/"+dirName);
      // dfs.mkdirs(src); 
    
        dfs.copyFromLocalFile(new Path("/home/inputs/TMPAAA022389.RPT"), new Path("/user/hdfs/test"));
    
    
    
      return null;
     }
    });
    }
    catch(Exception e){}
     }}
    

    【讨论】:

    • 嗨 sravan,谢谢您的回复,上面的代码是在 hdfs 中创建目录,但我的问题是我已经在 hdfs 中创建了一个目录,并使用 java api 将数据从本地移动到 hdfs,所以每当我移动数据时,它显示错误权限被拒绝,所以有什么方法可以在移动数据时授予目标目录的权限
    • 我更新了答案。您已经在文件系统上创建了 hdfs。所以在 code.run 的那个目录上创建一个远程用户。让我知道任何问题
    • 嗨 Sravan,在上面的代码中,它正在创建新用户,然后我们在该用户中创建目录,但我不想创建新用户,只是想成为 root (我的意思是将当前用户更改为 root 以复制该特定文件)并复制该文件。这可能吗?
    • 嗨 Sravan,在上面的代码中,它正在创建新用户,然后我们在该用户中创建目录,但我不想创建新用户,只是想成为 root (我的意思是将当前用户更改为 root 以复制该特定文件)并复制该文件。这可能吗?有急事请尽快回复..
    • 你能从命令行复制它吗..like hadoop fs -put source hdfs-dest
    猜你喜欢
    • 2015-07-12
    • 2020-12-11
    • 2018-09-23
    • 2018-08-12
    • 1970-01-01
    • 2021-12-08
    • 1970-01-01
    • 2020-05-07
    • 2016-03-23
    相关资源
    最近更新 更多