当前位置 : 主页 > 网络编程 > 其它编程 >

org.apache.hadoop.mapred.JobID.toString()方法的使用及代码示例

来源:互联网 收集:自由互联 发布时间:2023-07-02
本文整理了Java中org.apache.hadoop.mapred.JobID.toString()方法的一些代码示例,展示了JobID.toString 本文整理了Java中org.apache.hadoop.mapred.JobID.toString()方法的一些代码示例,展示了JobID.toString()的具体
本文整理了Java中org.apache.hadoop.mapred.JobID.toString()方法的一些代码示例,展示了JobID.toString

本文整理了Java中org.apache.hadoop.mapred.JobID.toString()方法的一些代码示例,展示了JobID.toString()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。JobID.toString()方法的具体详情如下:包路径:org.apache.hadoop.mapred.JobID类名称:JobID方法名:toString

JobID.toString介绍

[英]Returns the string representation w/o prefix[中]返回不带前缀的字符串表示形式

代码示例

代码示例来源:origin: apache/hive

private void updateMapRedStatsJson(MapRedStats stats, RunningJob rj) throws IOException, JSONException { if (statsJSON == null) { statsJSON = new JSONObject(); } if (stats != null) { if (stats.getNumMap() >= 0) { statsJSON.put(NUMBER_OF_MAPPERS, stats.getNumMap()); } if (stats.getNumReduce() >= 0) { statsJSON.put(NUMBER_OF_REDUCERS, stats.getNumReduce()); } if (stats.getCounters() != null) { statsJSON.put(COUNTERS, getCountersJson(stats.getCounters())); } } if (rj != null) { statsJSON.put(JOB_ID, rj.getID().toString()); statsJSON.put(JOB_FILE, rj.getJobFile()); statsJSON.put(TRACKING_URL, rj.getTrackingURL()); statsJSON.put(MAP_PROGRESS, Math.round(rj.mapProgress() * 100)); statsJSON.put(REDUCE_PROGRESS, Math.round(rj.reduceProgress() * 100)); statsJSON.put(CLEANUP_PROGRESS, Math.round(rj.cleanupProgress() * 100)); statsJSON.put(SETUP_PROGRESS, Math.round(rj.setupProgress() * 100)); statsJSON.put(COMPLETE, rj.isComplete()); statsJSON.put(SUCCESSFUL, rj.isSuccessful()); }}

代码示例来源:origin: Qihoo360/XLearning

jobConf.set("mapred.tip.id", taId.getTaskID().toString());jobConf.set("mapred.task.id", taId.toString());jobConf.set("mapred.job.id", jobID.toString());amClient.reportMapedTaskID(containerId, taId.toString());RecordWriter writer = outputFormat.getRecordWriter(dfs, jobConf, "part-r", Reporter.NULL);

代码示例来源:origin: apache/hive

public List listJobs(String user, boolean showall, String jobId, int numRecords, boolean showDetails) throws NotAuthorizedException, BadParam, IOException, InterruptedException { UserGroupInformation ugi = null; WebHCatJTShim tracker = null; ArrayList ids = new ArrayList(); try { ugi = UgiFactory.getUgi(user); tracker = ShimLoader.getHadoopShims().getWebHCatShim(appConf, ugi); JobStatus[] jobs = tracker.getAllJobs(); if (jobs != null) { for (JobStatus job : jobs) { String id = job.getJobID().toString(); if (showall || user.equals(job.getUsername())) ids.add(id); } } } catch (IllegalStateException e) { throw new BadParam(e.getMessage()); } finally { if (tracker != null) tracker.close(); if (ugi != null) FileSystem.closeAllForUGI(ugi); } return getJobStatus(ids, user, showall, jobId, numRecords, showDetails);}

代码示例来源:origin: apache/hive

String jobId = t.getTaskAttemptId().getJobID().toString();if (firstError) { console.printError("Examining task ID: " + taskId + " (and more) from job " + jobId);

代码示例来源:origin: apache/hive

"' with jobID=" + rj.getID() + " compaction ID=" + id);try { msc.setHadoopJobid(rj.getID().toString(), id);} catch (TException e) { LOG.warn("Error setting hadoop job, jobId=" + rj.getID().toString() + " compactiOnId=" + id, e);

代码示例来源:origin: apache/drill

String jobId = t.getTaskAttemptId().getJobID().toString();if (firstError) { console.printError("Examining task ID: " + taskId + " (and more) from job " + jobId);

代码示例来源:origin: apache/drill

RunningJob rj = new JobClient(job).submitJob(job);LOG.info("Submitted compaction job '" + job.getJobName() + "' with jobID=" + rj.getID() + " compaction ID=" + id);txnHandler.setHadoopJobId(rj.getID().toString(), id);rj.waitForCompletion();if (!rj.isSuccessful()) {

代码示例来源:origin: apache/hive

/** * Create a new QueueStatusBean * * @param state store job state * @param status job status * @param profile job profile */ public QueueStatusBean(JobState state, JobStatus status, JobProfile profile) throws IOException { this.status = status; this.profile = profile; id = profile.getJobID().toString(); parentId = state.getParent(); percentComplete = state.getPercentComplete(); exitValue = state.getExitValue(); user = profile.getUser(); callback = state.getCallback(); completed = state.getCompleteStatus(); userargs = state.getUserArgs(); }}

代码示例来源:origin: twitter/ambrose

TaskReport[] reduceTaskReport) throws IOException { jobId = runningJob.getID().toString(); jobName = runningJob.getJobName(); trackingURL = runningJob.getTrackingURL();

代码示例来源:origin: apache/drill

for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) { try { clientStatPublisher.run(exctractedCounters, rj.getID().toString()); } catch (RuntimeException runtimeException) { LOG.error("Exception " + runtimeException.getClass().getCanonicalName()MapRedStats mapRedStats = new MapRedStats(numMap, numReduce, cpuMsec, success, rj.getID().toString());mapRedStats.setCounters(ctrs);

代码示例来源:origin: apache/hive

job, numMap, numReduce, cpuMsec, false, rj.getID().toString());updateMapRedTaskWebUIStatistics(mapRedStats, rj); for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) { try { clientStatPublisher.run(exctractedCounters, rj.getID().toString()); } catch (RuntimeException runtimeException) { LOG.error("Exception " + runtimeException.getClass().getCanonicalName()

代码示例来源:origin: apache/hive

killJob();jobID = rj.getID().toString();

代码示例来源:origin: apache/hive

/** * from StreamJob.java. */public void jobInfo(RunningJob rj) { if (ShimLoader.getHadoopShims().isLocalMode(job)) { console.printInfo("Job running in-process (local Hadoop)"); } else { if (SessionState.get() != null) { SessionState.get().getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_HADOOP_ID, rj.getID().toString()); } console.printInfo(getJobStartMsg(rj.getID()) + ", Tracking URL = " + rj.getTrackingURL()); console.printInfo("Kill Command = " + HiveConf.getVar(job, ConfVars.MAPREDBIN) + " job -kill " + rj.getID()); }}

代码示例来源:origin: apache/drill

rj.killJob();jobID = rj.getID().toString();

代码示例来源:origin: apache/drill

/** * from StreamJob.java. */public void jobInfo(RunningJob rj) { if (ShimLoader.getHadoopShims().isLocalMode(job)) { console.printInfo("Job running in-process (local Hadoop)"); } else { if (SessionState.get() != null) { SessionState.get().getHiveHistory().setTaskProperty(queryId, getId(), Keys.TASK_HADOOP_ID, rj.getID().toString()); } console.printInfo(getJobStartMsg(rj.getID()) + ", Tracking URL = " + rj.getTrackingURL()); console.printInfo("Kill Command = " + HiveConf.getVar(job, HiveConf.ConfVars.HADOOPBIN) + " job -kill " + rj.getID()); }}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

when(jobID.toString()).thenReturn(TEST_JOB_ID);

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

/** * @deprecated use getJobID() instead */@Deprecatedpublic String getJobId() { return jobid.toString();}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-common

/** * Get the done summary file name for a job. * @param jobId the jobId. * @return the conf file name. */public static String getIntermediateSummaryFileName(JobId jobId) { return TypeConverter.fromYarn(jobId).toString() + SUMMARY_FILE_NAME_SUFFIX;}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-common

/** * Get the job history file path for non Done history files. */public static Path getStagingJobHistoryFile(Path dir, JobId jobId, int attempt) { return getStagingJobHistoryFile(dir, TypeConverter.fromYarn(jobId).toString(), attempt);}

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

/** * Localize the given JobConf to be specific for this task. */public void localizeConfiguration(JobConf conf) throws IOException { conf.set(JobContext.TASK_ID, taskId.getTaskID().toString()); conf.set(JobContext.TASK_ATTEMPT_ID, taskId.toString()); conf.setBoolean(JobContext.TASK_ISMAP, isMapTask()); conf.setInt(JobContext.TASK_PARTITION, partition); conf.set(JobContext.ID, taskId.getJobID().toString());}

上一篇:关于/etc/sudoers.d/下的普通用户提权
下一篇:没有了
网友评论